Index: projects/clang500-import/Makefile.inc1 =================================================================== --- projects/clang500-import/Makefile.inc1 (revision 319548) +++ projects/clang500-import/Makefile.inc1 (revision 319549) @@ -1,2740 +1,2741 @@ # # $FreeBSD$ # # Make command line options: # -DNO_CLEANDIR run ${MAKE} clean, instead of ${MAKE} cleandir # -DNO_CLEAN do not clean at all # -DDB_FROM_SRC use the user/group databases in src/etc instead of # the system database when installing. # -DNO_SHARE do not go into share subdir # -DKERNFAST define NO_KERNEL{CONFIG,CLEAN,OBJ} # -DNO_KERNELCONFIG do not run config in ${MAKE} buildkernel # -DNO_KERNELCLEAN do not run ${MAKE} clean in ${MAKE} buildkernel # -DNO_KERNELOBJ do not run ${MAKE} obj in ${MAKE} buildkernel # -DNO_PORTSUPDATE do not update ports in ${MAKE} update # -DNO_ROOT install without using root privilege # -DNO_DOCUPDATE do not update doc in ${MAKE} update # -DWITHOUT_CTF do not run the DTrace CTF conversion tools on built objects # LOCAL_DIRS="list of dirs" to add additional dirs to the SUBDIR list # LOCAL_ITOOLS="list of tools" to add additional tools to the ITOOLS list # LOCAL_LIB_DIRS="list of dirs" to add additional dirs to libraries target # LOCAL_MTREE="list of mtree files" to process to allow local directories # to be created before files are installed # LOCAL_TOOL_DIRS="list of dirs" to add additional dirs to the build-tools # list # LOCAL_XTOOL_DIRS="list of dirs" to add additional dirs to the # cross-tools target # METALOG="path to metadata log" to write permission and ownership # when NO_ROOT is set. (default: ${DESTDIR}/METALOG) # TARGET="machine" to crossbuild world for a different machine type # TARGET_ARCH= may be required when a TARGET supports multiple endians # BUILDENV_SHELL= shell to launch for the buildenv target (def:${SHELL}) # WORLD_FLAGS= additional flags to pass to make(1) during buildworld # KERNEL_FLAGS= additional flags to pass to make(1) during buildkernel # SUBDIR_OVERRIDE="list of dirs" to build rather than everything. # All libraries and includes, and some build tools will still build. # # The intended user-driven targets are: # buildworld - rebuild *everything*, including glue to help do upgrades # installworld- install everything built by "buildworld" # checkworld - run test suite on installed world # doxygen - build API documentation of the kernel # update - convenient way to update your source tree (eg: svn/svnup) # # Standard targets (not defined here) are documented in the makefiles in # /usr/share/mk. These include: # obj depend all install clean cleandepend cleanobj .if !defined(TARGET) || !defined(TARGET_ARCH) .error "Both TARGET and TARGET_ARCH must be defined." .endif SRCDIR?= ${.CURDIR} LOCALBASE?= /usr/local # Cross toolchain changes must be in effect before bsd.compiler.mk # so that gets the right CC, and pass CROSS_TOOLCHAIN to submakes. .if defined(CROSS_TOOLCHAIN) .include "${LOCALBASE}/share/toolchains/${CROSS_TOOLCHAIN}.mk" CROSSENV+=CROSS_TOOLCHAIN="${CROSS_TOOLCHAIN}" .endif .if defined(CROSS_TOOLCHAIN_PREFIX) CROSS_COMPILER_PREFIX?=${CROSS_TOOLCHAIN_PREFIX} .endif XCOMPILERS= CC CXX CPP .for COMPILER in ${XCOMPILERS} .if defined(CROSS_COMPILER_PREFIX) X${COMPILER}?= ${CROSS_COMPILER_PREFIX}${${COMPILER}} .else X${COMPILER}?= ${${COMPILER}} .endif .endfor # If a full path to an external cross compiler is given, don't build # a cross compiler. .if ${XCC:N${CCACHE_BIN}:M/*} MK_CLANG_BOOTSTRAP= no MK_GCC_BOOTSTRAP= no .endif MAKEOBJDIRPREFIX?= /usr/obj .if ${MACHINE} == ${TARGET} && ${MACHINE_ARCH} == ${TARGET_ARCH} && !defined(CROSS_BUILD_TESTING) OBJTREE= ${MAKEOBJDIRPREFIX} .else OBJTREE= ${MAKEOBJDIRPREFIX}/${TARGET}.${TARGET_ARCH} .endif # Pull in compiler metadata from buildworld/toolchain if possible to avoid # running CC from bsd.compiler.mk. .if make(installworld) || make(install) .-include "${OBJTREE}${.CURDIR}/compiler-metadata.mk" .endif # Pull in COMPILER_TYPE and COMPILER_FREEBSD_VERSION early. .include .include "share/mk/src.opts.mk" # Check if there is a local compiler that can satisfy as an external compiler. # Which compiler is expected to be used? .if ${MK_CLANG_BOOTSTRAP} == "yes" WANT_COMPILER_TYPE= clang .elif ${MK_GCC_BOOTSTRAP} == "yes" WANT_COMPILER_TYPE= gcc .else WANT_COMPILER_TYPE= .endif .if !defined(WANT_COMPILER_FREEBSD_VERSION) .if ${WANT_COMPILER_TYPE} == "clang" WANT_COMPILER_FREEBSD_VERSION_FILE= lib/clang/freebsd_cc_version.h WANT_COMPILER_FREEBSD_VERSION!= \ awk '$$2 == "FREEBSD_CC_VERSION" {printf("%d\n", $$3)}' \ ${SRCDIR}/${WANT_COMPILER_FREEBSD_VERSION_FILE} || echo unknown WANT_COMPILER_VERSION_FILE= lib/clang/include/clang/Basic/Version.inc WANT_COMPILER_VERSION!= \ awk '$$2 == "CLANG_VERSION" {split($$3, a, "."); print a[1] * 10000 + a[2] * 100 + a[3]}' \ ${SRCDIR}/${WANT_COMPILER_VERSION_FILE} || echo unknown .elif ${WANT_COMPILER_TYPE} == "gcc" WANT_COMPILER_FREEBSD_VERSION_FILE= gnu/usr.bin/cc/cc_tools/freebsd-native.h WANT_COMPILER_FREEBSD_VERSION!= \ awk '$$2 == "FBSD_CC_VER" {printf("%d\n", $$3)}' \ ${SRCDIR}/${WANT_COMPILER_FREEBSD_VERSION_FILE} || echo unknown WANT_COMPILER_VERSION_FILE= contrib/gcc/BASE-VER WANT_COMPILER_VERSION!= \ awk -F. '{print $$1 * 10000 + $$2 * 100 + $$3}' \ ${SRCDIR}/${WANT_COMPILER_VERSION_FILE} || echo unknown .endif .export WANT_COMPILER_FREEBSD_VERSION WANT_COMPILER_VERSION .endif # !defined(WANT_COMPILER_FREEBSD_VERSION) # It needs to be the same revision as we would build for the bootstrap. # If the expected vs CC is different then we can't skip. # GCC cannot be used for cross-arch yet. For clang we pass -target later if # TARGET_ARCH!=MACHINE_ARCH. .if ${MK_SYSTEM_COMPILER} == "yes" && \ (${MK_CLANG_BOOTSTRAP} == "yes" || ${MK_GCC_BOOTSTRAP} == "yes") && \ !make(showconfig) && !make(native-xtools) && !make(xdev*) && \ ${WANT_COMPILER_TYPE} == ${COMPILER_TYPE} && \ (${COMPILER_TYPE} == "clang" || ${TARGET_ARCH} == ${MACHINE_ARCH}) && \ ${COMPILER_VERSION} == ${WANT_COMPILER_VERSION} && \ ${COMPILER_FREEBSD_VERSION} == ${WANT_COMPILER_FREEBSD_VERSION} # Everything matches, disable the bootstrap compiler. MK_CLANG_BOOTSTRAP= no MK_GCC_BOOTSTRAP= no USING_SYSTEM_COMPILER= yes .endif # ${WANT_COMPILER_TYPE} == ${COMPILER_TYPE} USING_SYSTEM_COMPILER?= no TEST_SYSTEM_COMPILER_VARS= \ USING_SYSTEM_COMPILER MK_SYSTEM_COMPILER \ MK_CROSS_COMPILER MK_CLANG_BOOTSTRAP MK_GCC_BOOTSTRAP \ WANT_COMPILER_TYPE WANT_COMPILER_VERSION WANT_COMPILER_VERSION_FILE \ WANT_COMPILER_FREEBSD_VERSION WANT_COMPILER_FREEBSD_VERSION_FILE \ CC COMPILER_TYPE COMPILER_FEATURES COMPILER_VERSION \ COMPILER_FREEBSD_VERSION test-system-compiler: .PHONY .for v in ${TEST_SYSTEM_COMPILER_VARS} ${_+_}@printf "%-35s= %s\n" "${v}" "${${v}}" .endfor .if ${USING_SYSTEM_COMPILER} == "yes" && \ (make(buildworld) || make(buildkernel) || make(kernel-toolchain) || \ make(toolchain) || make(_cross-tools)) .info SYSTEM_COMPILER: Determined that CC=${CC} matches the source tree. Not bootstrapping a cross-compiler. .endif # For installworld need to ensure that the looked-up compiler metadata is # passed along rather than trying to run cc from the restricted # STRICTTMPPATH. .if ${MK_CLANG_BOOTSTRAP} == "no" && ${MK_GCC_BOOTSTRAP} == "no" .if !defined(X_COMPILER_TYPE) CROSSENV+= COMPILER_VERSION=${COMPILER_VERSION} \ COMPILER_TYPE=${COMPILER_TYPE} \ COMPILER_FEATURES=${COMPILER_FEATURES} \ COMPILER_FREEBSD_VERSION=${COMPILER_FREEBSD_VERSION} .else CROSSENV+= COMPILER_VERSION=${X_COMPILER_VERSION} \ COMPILER_FEATURES=${X_COMPILER_FEATURES} \ COMPILER_TYPE=${X_COMPILER_TYPE} \ COMPILER_FREEBSD_VERSION=${X_COMPILER_FREEBSD_VERSION} .endif .endif # Store some compiler metadata for use in installworld where we don't # want to invoke CC at all. _COMPILER_METADATA_VARS= COMPILER_VERSION \ COMPILER_TYPE \ COMPILER_FEATURES \ COMPILER_FREEBSD_VERSION compiler-metadata.mk: .PHONY .META @: > ${.TARGET} @echo ".info Using cached compiler metadata from build at $$(hostname) on $$(date)" \ > ${.TARGET} .for v in ${_COMPILER_METADATA_VARS} @echo "${v}=${${v}}" >> ${.TARGET} .endfor @echo ".export ${_COMPILER_METADATA_VARS}" >> ${.TARGET} # Handle external binutils. .if defined(CROSS_TOOLCHAIN_PREFIX) CROSS_BINUTILS_PREFIX?=${CROSS_TOOLCHAIN_PREFIX} .endif # If we do not have a bootstrap binutils (because the in-tree one does not # support the target architecture), provide a default cross-binutils prefix. # This allows riscv64 builds, for example, to automatically use the # riscv64-binutils port or package. .if !make(showconfig) .if !empty(BROKEN_OPTIONS:MBINUTILS_BOOTSTRAP) && \ ${MK_LLD_BOOTSTRAP} == "no" && \ !defined(CROSS_BINUTILS_PREFIX) CROSS_BINUTILS_PREFIX=/usr/local/${TARGET_ARCH}-freebsd/bin/ .if !exists(${CROSS_BINUTILS_PREFIX}) .error In-tree binutils does not support the ${TARGET_ARCH} architecture. Install the ${TARGET_ARCH}-binutils port or package or set CROSS_BINUTILS_PREFIX. .endif .endif .endif XBINUTILS= AS AR LD NM OBJCOPY RANLIB SIZE STRINGS .for BINUTIL in ${XBINUTILS} .if defined(CROSS_BINUTILS_PREFIX) && \ exists(${CROSS_BINUTILS_PREFIX}${${BINUTIL}}) X${BINUTIL}?= ${CROSS_BINUTILS_PREFIX}${${BINUTIL}} .else X${BINUTIL}?= ${${BINUTIL}} .endif .endfor # We must do lib/ and libexec/ before bin/ in case of a mid-install error to # keep the users system reasonably usable. For static->dynamic root upgrades, # we don't want to install a dynamic binary without rtld and the needed # libraries. More commonly, for dynamic root, we don't want to install a # binary that requires a newer library version that hasn't been installed yet. # This ordering is not a guarantee though. The only guarantee of a working # system here would require fine-grained ordering of all components based # on their dependencies. .if !empty(SUBDIR_OVERRIDE) SUBDIR= ${SUBDIR_OVERRIDE} .else SUBDIR= lib libexec .if !defined(NO_ROOT) && (make(installworld) || make(install)) # Ensure libraries are installed before progressing. SUBDIR+=.WAIT .endif SUBDIR+=bin .if ${MK_CDDL} != "no" SUBDIR+=cddl .endif SUBDIR+=gnu include .if ${MK_KERBEROS} != "no" SUBDIR+=kerberos5 .endif .if ${MK_RESCUE} != "no" SUBDIR+=rescue .endif SUBDIR+=sbin .if ${MK_CRYPT} != "no" SUBDIR+=secure .endif .if !defined(NO_SHARE) SUBDIR+=share .endif SUBDIR+=sys usr.bin usr.sbin .if ${MK_TESTS} != "no" SUBDIR+= tests .endif .if ${MK_OFED} != "no" SUBDIR+=contrib/ofed .endif # Local directories are last, since it is nice to at least get the base # system rebuilt before you do them. .for _DIR in ${LOCAL_DIRS} .if exists(${.CURDIR}/${_DIR}/Makefile) SUBDIR+= ${_DIR} .endif .endfor # Add LOCAL_LIB_DIRS, but only if they will not be picked up as a SUBDIR # of a LOCAL_DIRS directory. This allows LOCAL_DIRS=foo and # LOCAL_LIB_DIRS=foo/lib to behave as expected. .for _DIR in ${LOCAL_DIRS:M*/} ${LOCAL_DIRS:N*/:S|$|/|} _REDUNDANT_LIB_DIRS+= ${LOCAL_LIB_DIRS:M${_DIR}*} .endfor .for _DIR in ${LOCAL_LIB_DIRS} .if empty(_REDUNDANT_LIB_DIRS:M${_DIR}) && exists(${.CURDIR}/${_DIR}/Makefile) SUBDIR+= ${_DIR} .endif .endfor # We must do etc/ last as it hooks into building the man whatis file # by calling 'makedb' in share/man. This is only relevant for # install/distribute so they build the whatis file after every manpage is # installed. .if make(installworld) || make(install) SUBDIR+=.WAIT .endif SUBDIR+=etc .endif # !empty(SUBDIR_OVERRIDE) .if defined(NOCLEAN) .warning NOCLEAN option is deprecated. Use NO_CLEAN instead. NO_CLEAN= ${NOCLEAN} .endif .if defined(NO_CLEANDIR) CLEANDIR= clean cleandepend .else CLEANDIR= cleandir .endif .if defined(WORLDFAST) NO_CLEAN= t NO_OBJ= t .endif .if ${MK_META_MODE} == "yes" # If filemon is used then we can rely on the build being incremental-safe. # The .meta files will also track the build command and rebuild should # it change. .if empty(.MAKE.MODE:Mnofilemon) NO_CLEAN= t .endif .endif .if defined(NO_OBJ) || ${MK_AUTO_OBJ} == "yes" NO_OBJ= t NO_KERNELOBJ= t .endif .if !defined(NO_OBJ) _obj= obj .endif LOCAL_TOOL_DIRS?= PACKAGEDIR?= ${DESTDIR}/${DISTDIR} .if empty(SHELL:M*csh*) BUILDENV_SHELL?=${SHELL} .else BUILDENV_SHELL?=/bin/sh .endif .if !defined(SVN) || empty(SVN) . for _P in /usr/bin /usr/local/bin . for _S in svn svnlite . if exists(${_P}/${_S}) SVN= ${_P}/${_S} . endif . endfor . endfor .endif SVNFLAGS?= -r HEAD .if !defined(OSRELDATE) .if exists(/usr/include/osreldate.h) OSRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ /usr/include/osreldate.h .else OSRELDATE= 0 .endif .export OSRELDATE .endif # Set VERSION for CTFMERGE to use via the default CTFFLAGS=-L VERSION. .if !defined(_REVISION) _REVISION!= MK_AUTO_OBJ=no ${MAKE} -C ${SRCDIR}/release -V REVISION .export _REVISION .endif .if !defined(_BRANCH) _BRANCH!= MK_AUTO_OBJ=no ${MAKE} -C ${SRCDIR}/release -V BRANCH .export _BRANCH .endif .if !defined(SRCRELDATE) SRCRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ ${SRCDIR}/sys/sys/param.h .export SRCRELDATE .endif .if !defined(VERSION) VERSION= FreeBSD ${_REVISION}-${_BRANCH:C/-p[0-9]+$//} ${TARGET_ARCH} ${SRCRELDATE} .export VERSION .endif .if !defined(PKG_VERSION) .if ${_BRANCH:MSTABLE*} || ${_BRANCH:MCURRENT*} || ${_BRANCH:MALPHA*} TIMENOW= %Y%m%d%H%M%S EXTRA_REVISION= .s${TIMENOW:gmtime} .endif .if ${_BRANCH:M*-p*} EXTRA_REVISION= _${_BRANCH:C/.*-p([0-9]+$)/\1/} .endif PKG_VERSION= ${_REVISION}${EXTRA_REVISION} .endif KNOWN_ARCHES?= aarch64/arm64 \ amd64 \ arm \ armeb/arm \ armv6/arm \ i386 \ mips \ mipsel/mips \ mips64el/mips \ mipsn32el/mips \ mips64/mips \ mipsn32/mips \ mipshf/mips \ mipselhf/mips \ mips64elhf/mips \ mips64hf/mips \ powerpc \ powerpc64/powerpc \ powerpcspe/powerpc \ riscv64/riscv \ riscv64sf/riscv \ sparc64 .if ${TARGET} == ${TARGET_ARCH} _t= ${TARGET} .else _t= ${TARGET_ARCH}/${TARGET} .endif .for _t in ${_t} .if empty(KNOWN_ARCHES:M${_t}) .error Unknown target ${TARGET_ARCH}:${TARGET}. .endif .endfor .if ${TARGET} == ${MACHINE} TARGET_CPUTYPE?=${CPUTYPE} .else TARGET_CPUTYPE?= .endif .if !empty(TARGET_CPUTYPE) _TARGET_CPUTYPE=${TARGET_CPUTYPE} .else _TARGET_CPUTYPE=dummy .endif _CPUTYPE!= MK_AUTO_OBJ=no MAKEFLAGS= CPUTYPE=${_TARGET_CPUTYPE} ${MAKE} \ -f /dev/null -m ${.CURDIR}/share/mk -V CPUTYPE .if ${_CPUTYPE} != ${_TARGET_CPUTYPE} .error CPUTYPE global should be set with ?=. .endif .if make(buildworld) BUILD_ARCH!= uname -p .if ${MACHINE_ARCH} != ${BUILD_ARCH} .error To cross-build, set TARGET_ARCH. .endif .endif WORLDTMP= ${OBJTREE}${.CURDIR}/tmp BPATH= ${WORLDTMP}/legacy/usr/sbin:${WORLDTMP}/legacy/usr/bin:${WORLDTMP}/legacy/bin XPATH= ${WORLDTMP}/usr/sbin:${WORLDTMP}/usr/bin STRICTTMPPATH= ${BPATH}:${XPATH} TMPPATH= ${STRICTTMPPATH}:${PATH} # # Avoid running mktemp(1) unless actually needed. # It may not be functional, e.g., due to new ABI # when in the middle of installing over this system. # .if make(distributeworld) || make(installworld) || make(stageworld) INSTALLTMP!= /usr/bin/mktemp -d -u -t install .endif .if make(stagekernel) || make(distributekernel) TAGS+= kernel PACKAGE= kernel .endif # # Building a world goes through the following stages # # 1. legacy stage [BMAKE] # This stage is responsible for creating compatibility # shims that are needed by the bootstrap-tools, # build-tools and cross-tools stages. These are generally # APIs that tools from one of those three stages need to # build that aren't present on the host. # 1. bootstrap-tools stage [BMAKE] # This stage is responsible for creating programs that # are needed for backward compatibility reasons. They # are not built as cross-tools. # 2. build-tools stage [TMAKE] # This stage is responsible for creating the object # tree and building any tools that are needed during # the build process. Some programs are listed during # this phase because they build binaries to generate # files needed to build these programs. This stage also # builds the 'build-tools' target rather than 'all'. # 3. cross-tools stage [XMAKE] # This stage is responsible for creating any tools that # are needed for building the system. A cross-compiler is one # of them. This differs from build tools in two ways: # 1. the 'all' target is built rather than 'build-tools' # 2. these tools are installed into TMPPATH for stage 4. # 4. world stage [WMAKE] # This stage actually builds the world. # 5. install stage (optional) [IMAKE] # This stage installs a previously built world. # BOOTSTRAPPING?= 0 # Keep these in sync -- see below for special case exception MINIMUM_SUPPORTED_OSREL?= 900044 MINIMUM_SUPPORTED_REL?= 9.1 # Common environment for world related stages CROSSENV+= MAKEOBJDIRPREFIX=${OBJTREE} \ MACHINE_ARCH=${TARGET_ARCH} \ MACHINE=${TARGET} \ CPUTYPE=${TARGET_CPUTYPE} .if ${MK_META_MODE} != "no" # Don't rebuild build-tools targets during normal build. CROSSENV+= BUILD_TOOLS_META=.NOMETA .endif .if defined(TARGET_CFLAGS) CROSSENV+= ${TARGET_CFLAGS} .endif # bootstrap-tools stage BMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \ TOOLS_PREFIX=${WORLDTMP} \ PATH=${BPATH}:${PATH} \ WORLDTMP=${WORLDTMP} \ MAKEFLAGS="-m ${.CURDIR}/tools/build/mk ${.MAKEFLAGS}" # need to keep this in sync with targets/pseudo/bootstrap-tools/Makefile BSARGS= DESTDIR= \ BOOTSTRAPPING=${OSRELDATE} \ SSP_CFLAGS= \ MK_HTML=no NO_LINT=yes MK_MAN=no \ -DNO_PIC MK_PROFILE=no -DNO_SHARED \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no \ MK_CLANG_EXTRAS=no MK_CLANG_FULL=no \ MK_LLDB=no MK_TESTS=no \ MK_INCLUDES=yes BMAKE= MAKEOBJDIRPREFIX=${WORLDTMP} \ ${BMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ ${BSARGS} # build-tools stage TMAKE= MAKEOBJDIRPREFIX=${OBJTREE} \ ${BMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ DESTDIR= \ BOOTSTRAPPING=${OSRELDATE} \ SSP_CFLAGS= \ -DNO_LINT \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no \ MK_CLANG_EXTRAS=no MK_CLANG_FULL=no \ MK_LLDB=no MK_TESTS=no # cross-tools stage XMAKE= TOOLS_PREFIX=${WORLDTMP} ${BMAKE} \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ MK_GDB=no MK_TESTS=no # kernel-tools stage KTMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${BPATH}:${PATH} \ WORLDTMP=${WORLDTMP} KTMAKE= TOOLS_PREFIX=${WORLDTMP} MAKEOBJDIRPREFIX=${WORLDTMP} \ ${KTMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ DESTDIR= \ BOOTSTRAPPING=${OSRELDATE} \ SSP_CFLAGS= \ MK_HTML=no -DNO_LINT MK_MAN=no \ -DNO_PIC MK_PROFILE=no -DNO_SHARED \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no # world stage WMAKEENV= ${CROSSENV} \ INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${TMPPATH} # make hierarchy HMAKE= PATH=${TMPPATH} ${MAKE} LOCAL_MTREE=${LOCAL_MTREE:Q} .if defined(NO_ROOT) HMAKE+= PATH=${TMPPATH} METALOG=${METALOG} -DNO_ROOT .endif CROSSENV+= CC="${XCC} ${XCFLAGS}" CXX="${XCXX} ${XCXXFLAGS} ${XCFLAGS}" \ CPP="${XCPP} ${XCFLAGS}" \ AS="${XAS}" AR="${XAR}" LD="${XLD}" LLVM_LINK="${XLLVM_LINK}" \ NM=${XNM} OBJCOPY="${XOBJCOPY}" \ RANLIB=${XRANLIB} STRINGS=${XSTRINGS} \ SIZE="${XSIZE}" .if defined(CROSS_BINUTILS_PREFIX) && exists(${CROSS_BINUTILS_PREFIX}) # In the case of xdev-build tools, CROSS_BINUTILS_PREFIX won't be a # directory, but the compiler will look in the right place for its # tools so we don't need to tell it where to look. BFLAGS+= -B${CROSS_BINUTILS_PREFIX} .endif # The internal bootstrap compiler has a default sysroot set by TOOLS_PREFIX # and target set by TARGET/TARGET_ARCH. However, there are several needs to # always pass an explicit --sysroot and -target. # - External compiler needs sysroot and target flags. # - External ld needs sysroot. # - To be clear about the use of a sysroot when using the internal compiler. # - Easier debugging. # - Allowing WITH_SYSTEM_COMPILER+WITH_META_MODE to work together due to # the flip-flopping build command when sometimes using external and # sometimes using internal. # - Allow using lld which has no support for default paths. .if !defined(CROSS_BINUTILS_PREFIX) || !exists(${CROSS_BINUTILS_PREFIX}) BFLAGS+= -B${WORLDTMP}/usr/bin .endif .if ${TARGET} == "arm" .if ${TARGET_ARCH:Marmv6*} != "" && ${TARGET_CPUTYPE:M*soft*} == "" TARGET_ABI= gnueabihf .else TARGET_ABI= gnueabi .endif .endif .if ${WANT_COMPILER_TYPE} == gcc || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc) # GCC requires -isystem and -L when using a cross-compiler. --sysroot # won't set header path and -L is used to ensure the base library path # is added before the port PREFIX library path. XCFLAGS+= -isystem ${WORLDTMP}/usr/include -L${WORLDTMP}/usr/lib # GCC requires -B to find /usr/lib/crti.o when using a cross-compiler # combined with --sysroot. XCFLAGS+= -B${WORLDTMP}/usr/lib # Force using libc++ for external GCC. # XXX: This should be checking MK_GNUCXX == no .if ${X_COMPILER_VERSION} >= 40800 XCXXFLAGS+= -isystem ${WORLDTMP}/usr/include/c++/v1 -std=c++11 \ -nostdinc++ .endif .elif ${WANT_COMPILER_TYPE} == clang || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == clang) TARGET_ABI?= unknown TARGET_TRIPLE?= ${TARGET_ARCH:C/amd64/x86_64/}-${TARGET_ABI}-freebsd12.0 XCFLAGS+= -target ${TARGET_TRIPLE} .endif XCFLAGS+= --sysroot=${WORLDTMP} .if !empty(BFLAGS) XCFLAGS+= ${BFLAGS} .endif .if ${MK_LIB32} != "no" && (${TARGET_ARCH} == "amd64" || \ ${TARGET_ARCH} == "powerpc64" || ${TARGET_ARCH:Mmips64*} != "") LIBCOMPAT= 32 .include "Makefile.libcompat" .elif ${MK_LIBSOFT} != "no" && ${TARGET_ARCH} == "armv6" LIBCOMPAT= SOFT .include "Makefile.libcompat" .endif WMAKE= ${WMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 DESTDIR=${WORLDTMP} IMAKEENV= ${CROSSENV} IMAKE= ${IMAKEENV} ${MAKE} -f Makefile.inc1 \ ${IMAKE_INSTALL} ${IMAKE_MTREE} .if empty(.MAKEFLAGS:M-n) IMAKEENV+= PATH=${STRICTTMPPATH}:${INSTALLTMP} \ LD_LIBRARY_PATH=${INSTALLTMP} \ PATH_LOCALE=${INSTALLTMP}/locale IMAKE+= __MAKE_SHELL=${INSTALLTMP}/sh .else IMAKEENV+= PATH=${TMPPATH}:${INSTALLTMP} .endif .if defined(DB_FROM_SRC) INSTALLFLAGS+= -N ${.CURDIR}/etc MTREEFLAGS+= -N ${.CURDIR}/etc .endif _INSTALL_DDIR= ${DESTDIR}/${DISTDIR} INSTALL_DDIR= ${_INSTALL_DDIR:S://:/:g:C:/$::} .if defined(NO_ROOT) METALOG?= ${DESTDIR}/${DISTDIR}/METALOG IMAKE+= -DNO_ROOT METALOG=${METALOG} INSTALLFLAGS+= -U -M ${METALOG} -D ${INSTALL_DDIR} MTREEFLAGS+= -W .endif .if defined(BUILD_PKGS) INSTALLFLAGS+= -h sha256 .endif .if defined(DB_FROM_SRC) || defined(NO_ROOT) IMAKE_INSTALL= INSTALL="install ${INSTALLFLAGS}" IMAKE_MTREE= MTREE_CMD="mtree ${MTREEFLAGS}" .endif # kernel stage KMAKEENV= ${WMAKEENV} KMAKE= ${KMAKEENV} ${MAKE} ${.MAKEFLAGS} ${KERNEL_FLAGS} KERNEL=${INSTKERNNAME} # # buildworld # # Attempt to rebuild the entire system, with reasonable chance of # success, regardless of how old your existing system is. # _worldtmp: .PHONY .if ${.CURDIR:C/[^,]//g} != "" # The m4 build of sendmail files doesn't like it if ',' is used # anywhere in the path of it's files. @echo @echo "*** Error: path to source tree contains a comma ','" @echo false .endif @echo @echo "--------------------------------------------------------------" @echo ">>> Rebuilding the temporary build tree" @echo "--------------------------------------------------------------" .if !defined(NO_CLEAN) rm -rf ${WORLDTMP} .if defined(LIBCOMPAT) rm -rf ${LIBCOMPATTMP} .endif .else rm -rf ${WORLDTMP}/legacy/usr/include .endif # Dependencies cannot cope with certain source tree changes, particularly # with respect to removing source files and replacing generated files. # Handle these cases here in an ad-hoc fashion. # 20160829 remove stale dependencies for ptrace stub, rewritten in C # in r305012 .for f in ptrace .if exists(${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.o) @if egrep -q '/${f}.[sS]' \ ${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.o; then \ echo Removing stale dependencies for ${f} syscall wrappers; \ rm -f ${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.* \ ${OBJTREE}${.CURDIR}/world32/${.CURDIR}/lib/libc/.depend.${f}.*; \ fi .endif .endfor # 20170523 remove stale generated asm files for functions which are no longer # syscalls after r302092 (pipe) and r318736 (others) .for f in getdents lstat mknod pipe stat .if exists(${OBJTREE}${.CURDIR}/lib/libc/${f}.s) || \ exists(${OBJTREE}${.CURDIR}/lib/libc/${f}.S) @echo Removing stale generated ${f} syscall files @rm -f ${OBJTREE}${.CURDIR}/lib/libc/${f}.* \ ${OBJTREE}${.CURDIR}/lib/libc/.depend.${f}.* \ ${OBJTREE}${.CURDIR}/world32/${.CURDIR}/lib/libc/${f}.* \ ${OBJTREE}${.CURDIR}/world32/${.CURDIR}/lib/libc/.depend.${f}.* .endif .endfor .for _dir in \ lib lib/casper usr legacy/bin legacy/usr mkdir -p ${WORLDTMP}/${_dir} .endfor mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${WORLDTMP}/legacy/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${WORLDTMP}/legacy/usr/include >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${WORLDTMP}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${WORLDTMP}/usr/include >/dev/null ln -sf ${.CURDIR}/sys ${WORLDTMP} .if ${MK_DEBUG_FILES} != "no" # We could instead disable debug files for these build stages mtree -deU -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${WORLDTMP}/legacy/usr/lib >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${WORLDTMP}/usr/lib >/dev/null .endif .if defined(LIBCOMPAT) mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${WORLDTMP}/usr >/dev/null .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${WORLDTMP}/legacy/usr/lib/debug/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${WORLDTMP}/usr/lib/debug/usr >/dev/null .endif .endif .if ${MK_TESTS} != "no" mkdir -p ${WORLDTMP}${TESTSBASE} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${WORLDTMP}${TESTSBASE} >/dev/null .if ${MK_DEBUG_FILES} != "no" mkdir -p ${WORLDTMP}/usr/lib/debug/${TESTSBASE} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${WORLDTMP}/usr/lib/debug/${TESTSBASE} >/dev/null .endif .endif .for _mtree in ${LOCAL_MTREE} mtree -deU -f ${.CURDIR}/${_mtree} -p ${WORLDTMP} > /dev/null .endfor _legacy: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1.1: legacy release compatibility shims" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${BMAKE} legacy _bootstrap-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1.2: bootstrap tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${BMAKE} bootstrap-tools _cleanobj: .if !defined(NO_CLEAN) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.1: cleaning up the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} ${CLEANDIR} .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${LIBCOMPATWMAKE} -f Makefile.inc1 ${CLEANDIR} .endif .endif _obj: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.2: rebuilding the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} obj _build-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.3: build tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${TMAKE} build-tools _cross-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3: cross tools" @echo "--------------------------------------------------------------" @rm -f ${.OBJDIR}/compiler-metadata.mk ${_+_}cd ${.CURDIR}; ${XMAKE} cross-tools ${_+_}cd ${.CURDIR}; ${XMAKE} kernel-tools _compiler-metadata: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3.1: recording compiler metadata" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} compiler-metadata.mk _includes: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.1: building includes" @echo "--------------------------------------------------------------" # Special handling for SUBDIR_OVERRIDE in buildworld as they most likely need # headers from default SUBDIR. Do SUBDIR_OVERRIDE includes last. ${_+_}cd ${.CURDIR}; ${WMAKE} SUBDIR_OVERRIDE= SHARED=symlinks \ MK_INCLUDES=yes includes .if !empty(SUBDIR_OVERRIDE) && make(buildworld) ${_+_}cd ${.CURDIR}; ${WMAKE} MK_INCLUDES=yes SHARED=symlinks includes .endif _libraries: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.2: building libraries" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; \ ${WMAKE} -DNO_FSCHG MK_HTML=no -DNO_LINT MK_MAN=no \ MK_PROFILE=no MK_TESTS=no MK_TESTS_SUPPORT=${MK_TESTS} libraries everything: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.3: building everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; _PARALLEL_SUBDIR_OK=1 ${WMAKE} all WMAKE_TGTS= .if !defined(WORLDFAST) WMAKE_TGTS+= _worldtmp _legacy .if empty(SUBDIR_OVERRIDE) WMAKE_TGTS+= _bootstrap-tools .endif WMAKE_TGTS+= _cleanobj .if !defined(NO_OBJ) WMAKE_TGTS+= _obj .endif WMAKE_TGTS+= _build-tools _cross-tools WMAKE_TGTS+= _compiler-metadata WMAKE_TGTS+= _includes .endif .if !defined(NO_LIBS) WMAKE_TGTS+= _libraries .endif WMAKE_TGTS+= everything .if defined(LIBCOMPAT) && empty(SUBDIR_OVERRIDE) WMAKE_TGTS+= build${libcompat} .endif buildworld: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue .PHONY .ORDER: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue buildworld_prologue: .PHONY @echo "--------------------------------------------------------------" @echo ">>> World build started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" buildworld_epilogue: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> World build completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" # # We need to have this as a target because the indirection between Makefile # and Makefile.inc1 causes the correct PATH to be used, rather than a # modification of the current environment's PATH. In addition, we need # to quote multiword values. # buildenvvars: .PHONY @echo ${WMAKEENV:Q} ${.MAKE.EXPORTED:@v@$v=\"${$v}\"@} .if ${.TARGETS:Mbuildenv} .if ${.MAKEFLAGS:M-j} .error The buildenv target is incompatible with -j .endif .endif BUILDENV_DIR?= ${.CURDIR} buildenv: .PHONY @echo Entering world for ${TARGET_ARCH}:${TARGET} .if ${BUILDENV_SHELL:M*zsh*} @echo For ZSH you must run: export CPUTYPE=${TARGET_CPUTYPE} .endif @cd ${BUILDENV_DIR} && env ${WMAKEENV} BUILDENV=1 ${BUILDENV_SHELL} \ || true TOOLCHAIN_TGTS= ${WMAKE_TGTS:Neverything:Nbuild${libcompat}} toolchain: ${TOOLCHAIN_TGTS} .PHONY kernel-toolchain: ${TOOLCHAIN_TGTS:N_includes:N_libraries} .PHONY # # installcheck # # Checks to be sure system is ready for installworld/installkernel. # installcheck: _installcheck_world _installcheck_kernel .PHONY _installcheck_world: .PHONY _installcheck_kernel: .PHONY # # Require DESTDIR to be set if installing for a different architecture or # using the user/group database in the source tree. # .if ${TARGET_ARCH} != ${MACHINE_ARCH} || ${TARGET} != ${MACHINE} || \ defined(DB_FROM_SRC) .if !make(distributeworld) _installcheck_world: __installcheck_DESTDIR _installcheck_kernel: __installcheck_DESTDIR __installcheck_DESTDIR: .PHONY .if !defined(DESTDIR) || empty(DESTDIR) @echo "ERROR: Please set DESTDIR!"; \ false .endif .endif .endif .if !defined(DB_FROM_SRC) # # Check for missing UIDs/GIDs. # CHECK_UIDS= auditdistd CHECK_GIDS= audit .if ${MK_SENDMAIL} != "no" CHECK_UIDS+= smmsp CHECK_GIDS+= smmsp .endif .if ${MK_PF} != "no" CHECK_UIDS+= proxy CHECK_GIDS+= proxy authpf .endif .if ${MK_UNBOUND} != "no" CHECK_UIDS+= unbound CHECK_GIDS+= unbound .endif _installcheck_world: __installcheck_UGID __installcheck_UGID: .PHONY .for uid in ${CHECK_UIDS} @if ! `id -u ${uid} >/dev/null 2>&1`; then \ echo "ERROR: Required ${uid} user is missing, see /usr/src/UPDATING."; \ false; \ fi .endfor .for gid in ${CHECK_GIDS} @if ! `find / -prune -group ${gid} >/dev/null 2>&1`; then \ echo "ERROR: Required ${gid} group is missing, see /usr/src/UPDATING."; \ false; \ fi .endfor .endif # # If installing over the running system (DESTDIR is / or unset) and the install # includes rescue, try running rescue from the objdir as a sanity check. If # rescue is not functional (e.g., because it depends on a system call not # supported by the currently running kernel), abort the installation. # .if !make(distributeworld) && ${MK_RESCUE} != "no" && \ (empty(DESTDIR) || ${DESTDIR} == "/") && empty(BYPASS_INSTALLCHECK_SH) _installcheck_world: __installcheck_sh_check __installcheck_sh_check: .PHONY @if [ "`${OBJTREE}${.CURDIR}/rescue/rescue/rescue sh -c 'echo OK'`" != \ OK ]; then \ echo "rescue/sh check failed, installation aborted" >&2; \ false; \ fi .endif # # Required install tools to be saved in a scratch dir for safety. # .if ${MK_ZONEINFO} != "no" _zoneinfo= zic tzsetup .endif ITOOLS= [ awk cap_mkdb cat chflags chmod chown cmp cp \ date echo egrep find grep id install ${_install-info} \ ln make mkdir mtree mv pwd_mkdb \ rm sed services_mkdb sh strip sysctl test true uname wc ${_zoneinfo} \ ${LOCAL_ITOOLS} # Needed for share/man .if ${MK_MAN_UTILS} != "no" ITOOLS+=makewhatis .endif # # distributeworld # # Distributes everything compiled by a `buildworld'. # # installworld # # Installs everything compiled by a 'buildworld'. # # Non-base distributions produced by the base system EXTRA_DISTRIBUTIONS= doc .if defined(LIBCOMPAT) EXTRA_DISTRIBUTIONS+= lib${libcompat} .endif .if ${MK_TESTS} != "no" EXTRA_DISTRIBUTIONS+= tests .endif DEBUG_DISTRIBUTIONS= .if ${MK_DEBUG_FILES} != "no" DEBUG_DISTRIBUTIONS+= base ${EXTRA_DISTRIBUTIONS:S,doc,,:S,tests,,} .endif MTREE_MAGIC?= mtree 2.0 distributeworld installworld stageworld: _installcheck_world .PHONY mkdir -p ${INSTALLTMP} progs=$$(for prog in ${ITOOLS}; do \ if progpath=`which $$prog`; then \ echo $$progpath; \ else \ echo "Required tool $$prog not found in PATH." >&2; \ exit 1; \ fi; \ done); \ libs=$$(ldd -f "%o %p\n" -f "%o %p\n" $$progs 2>/dev/null | sort -u | \ while read line; do \ set -- $$line; \ if [ "$$2 $$3" != "not found" ]; then \ echo $$2; \ else \ echo "Required library $$1 not found." >&2; \ exit 1; \ fi; \ done); \ cp $$libs $$progs ${INSTALLTMP} cp -R $${PATH_LOCALE:-"/usr/share/locale"} ${INSTALLTMP}/locale .if defined(NO_ROOT) -mkdir -p ${METALOG:H} echo "#${MTREE_MAGIC}" > ${METALOG} .endif .if make(distributeworld) .for dist in ${EXTRA_DISTRIBUTIONS} -mkdir ${DESTDIR}/${DISTDIR}/${dist} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.root.dist \ -p ${DESTDIR}/${DISTDIR}/${dist} >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/include >/dev/null .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib >/dev/null .endif .if defined(LIBCOMPAT) mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib/debug/usr >/dev/null .endif .endif .if ${MK_TESTS} != "no" && ${dist} == "tests" -mkdir -p ${DESTDIR}/${DISTDIR}/${dist}${TESTSBASE} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}${TESTSBASE} >/dev/null .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib/debug/${TESTSBASE} >/dev/null .endif .endif .if defined(NO_ROOT) ${IMAKEENV} mtree -C -f ${.CURDIR}/etc/mtree/BSD.root.dist | \ sed -e 's#^\./#./${dist}/#' >> ${METALOG} ${IMAKEENV} mtree -C -f ${.CURDIR}/etc/mtree/BSD.usr.dist | \ sed -e 's#^\./#./${dist}/usr/#' >> ${METALOG} ${IMAKEENV} mtree -C -f ${.CURDIR}/etc/mtree/BSD.include.dist | \ sed -e 's#^\./#./${dist}/usr/include/#' >> ${METALOG} .if defined(LIBCOMPAT) ${IMAKEENV} mtree -C -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist | \ sed -e 's#^\./#./${dist}/usr/#' >> ${METALOG} .endif .endif .endfor -mkdir ${DESTDIR}/${DISTDIR}/base ${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ METALOG=${METALOG} ${IMAKE_INSTALL} ${IMAKE_MTREE} \ DISTBASE=/base DESTDIR=${DESTDIR}/${DISTDIR}/base \ LOCAL_MTREE=${LOCAL_MTREE:Q} distrib-dirs .endif ${_+_}cd ${.CURDIR}; ${IMAKE} re${.TARGET:S/world$//}; \ ${IMAKEENV} rm -rf ${INSTALLTMP} .if make(distributeworld) .for dist in ${EXTRA_DISTRIBUTIONS} find ${DESTDIR}/${DISTDIR}/${dist} -mindepth 1 -type d -empty -delete .endfor .if defined(NO_ROOT) .for dist in base ${EXTRA_DISTRIBUTIONS} @# For each file that exists in this dist, print the corresponding @# line from the METALOG. This relies on the fact that @# a line containing only the filename will sort immediately before @# the relevant mtree line. cd ${DESTDIR}/${DISTDIR}; \ find ./${dist} | sort -u ${METALOG} - | \ awk 'BEGIN { print "#${MTREE_MAGIC}" } !/ type=/ { file = $$1 } / type=/ { if ($$1 == file) { sub(/^\.\/${dist}\//, "./"); print } }' > \ ${DESTDIR}/${DISTDIR}/${dist}.meta .endfor .for dist in ${DEBUG_DISTRIBUTIONS} @# For each file that exists in this dist, print the corresponding @# line from the METALOG. This relies on the fact that @# a line containing only the filename will sort immediately before @# the relevant mtree line. cd ${DESTDIR}/${DISTDIR}; \ find ./${dist}/usr/lib/debug | sort -u ${METALOG} - | \ awk 'BEGIN { print "#${MTREE_MAGIC}" } !/ type=/ { file = $$1 } / type=/ { if ($$1 == file) { sub(/^\.\/${dist}\//, "./"); print } }' > \ ${DESTDIR}/${DISTDIR}/${dist}.debug.meta .endfor .endif .endif packageworld: .PHONY .for dist in base ${EXTRA_DISTRIBUTIONS} .if defined(NO_ROOT) ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - --exclude usr/lib/debug \ @${DESTDIR}/${DISTDIR}/${dist}.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}.txz .else ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - --exclude usr/lib/debug . | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}.txz .endif .endfor .for dist in ${DEBUG_DISTRIBUTIONS} . if defined(NO_ROOT) ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - @${DESTDIR}/${DISTDIR}/${dist}.debug.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}-dbg.txz . else ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvLf - usr/lib/debug | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}-dbg.txz . endif .endfor # # reinstall # # If you have a build server, you can NFS mount the source and obj directories # and do a 'make reinstall' on the *client* to install new binaries from the # most recent server build. # restage reinstall: .MAKE .PHONY @echo "--------------------------------------------------------------" @echo ">>> Making hierarchy" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 \ LOCAL_MTREE=${LOCAL_MTREE:Q} hierarchy .if make(restage) @echo "--------------------------------------------------------------" @echo ">>> Making distribution" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 \ LOCAL_MTREE=${LOCAL_MTREE:Q} distribution .endif @echo @echo "--------------------------------------------------------------" @echo ">>> Installing everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install${libcompat} .endif redistribute: .MAKE .PHONY @echo "--------------------------------------------------------------" @echo ">>> Distributing everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute${libcompat} \ DISTRIBUTION=lib${libcompat} .endif distrib-dirs distribution: .MAKE .PHONY ${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ ${IMAKE_INSTALL} ${IMAKE_MTREE} METALOG=${METALOG} ${.TARGET} .if make(distribution) ${_+_}cd ${.CURDIR}; ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} -f Makefile.inc1 ${IMAKE_INSTALL} \ METALOG=${METALOG} MK_TESTS=no installconfig .endif # # buildkernel and installkernel # # Which kernels to build and/or install is specified by setting # KERNCONF. If not defined a GENERIC kernel is built/installed. # Only the existing (depending TARGET) config files are used # for building kernels and only the first of these is designated # as the one being installed. # # Note that we have to use TARGET instead of TARGET_ARCH when # we're in kernel-land. Since only TARGET_ARCH is (expected) to # be set to cross-build, we have to make sure TARGET is set # properly. .if defined(KERNFAST) NO_KERNELCLEAN= t NO_KERNELCONFIG= t NO_KERNELOBJ= t # Shortcut for KERNCONF=Blah -DKERNFAST is now KERNFAST=Blah .if !defined(KERNCONF) && ${KERNFAST} != "1" KERNCONF=${KERNFAST} .endif .endif .if ${TARGET_ARCH} == "powerpc64" KERNCONF?= GENERIC64 .else KERNCONF?= GENERIC .endif INSTKERNNAME?= kernel KERNSRCDIR?= ${.CURDIR}/sys KRNLCONFDIR= ${KERNSRCDIR}/${TARGET}/conf KRNLOBJDIR= ${OBJTREE}${KERNSRCDIR} KERNCONFDIR?= ${KRNLCONFDIR} BUILDKERNELS= INSTALLKERNEL= .if defined(NO_INSTALLKERNEL) # All of the BUILDKERNELS loops start at index 1. BUILDKERNELS+= dummy .endif .for _kernel in ${KERNCONF} .if exists(${KERNCONFDIR}/${_kernel}) BUILDKERNELS+= ${_kernel} .if empty(INSTALLKERNEL) && !defined(NO_INSTALLKERNEL) INSTALLKERNEL= ${_kernel} .endif .endif .endfor ${WMAKE_TGTS:N_worldtmp:Nbuild${libcompat}} ${.ALLTARGETS:M_*:N_worldtmp}: .MAKE .PHONY # # buildkernel # # Builds all kernels defined by BUILDKERNELS. # buildkernel: .MAKE .PHONY .if empty(BUILDKERNELS:Ndummy) @echo "ERROR: Missing kernel configuration file(s) (${KERNCONF})."; \ false .endif @echo .for _kernel in ${BUILDKERNELS:Ndummy} @echo "--------------------------------------------------------------" @echo ">>> Kernel build for ${_kernel} started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" @echo "===> ${_kernel}" mkdir -p ${KRNLOBJDIR} .if !defined(NO_KERNELCONFIG) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1: configuring the kernel" @echo "--------------------------------------------------------------" cd ${KRNLCONFDIR}; \ PATH=${TMPPATH} \ config ${CONFIGARGS} -d ${KRNLOBJDIR}/${_kernel} \ -I '${KERNCONFDIR}' '${KERNCONFDIR}/${_kernel}' .endif .if !defined(NO_CLEAN) && !defined(NO_KERNELCLEAN) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.1: cleaning up the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} ${CLEANDIR} .endif .if !defined(NO_KERNELOBJ) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.2: rebuilding the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} obj .endif @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.3: build tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${KTMAKE} kernel-tools @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3.1: building everything" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} all -DNO_MODULES_OBJ @echo "--------------------------------------------------------------" @echo ">>> Kernel build for ${_kernel} completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" .endfor NO_INSTALLEXTRAKERNELS?= yes # # installkernel, etc. # # Install the kernel defined by INSTALLKERNEL # installkernel installkernel.debug \ reinstallkernel reinstallkernel.debug: _installcheck_kernel .PHONY .if !defined(NO_INSTALLKERNEL) .if empty(INSTALLKERNEL) @echo "ERROR: No kernel \"${KERNCONF}\" to install."; \ false .endif @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${INSTALLKERNEL}" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \ ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} ${IMAKE_INSTALL} KERNEL=${INSTKERNNAME} ${.TARGET:S/kernel//} .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${_kernel}" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${_kernel}; \ ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} ${IMAKE_INSTALL} KERNEL=${INSTKERNNAME}.${_kernel} ${.TARGET:S/kernel//} .endfor .endif distributekernel distributekernel.debug: .PHONY .if !defined(NO_INSTALLKERNEL) .if empty(INSTALLKERNEL) @echo "ERROR: No kernel \"${KERNCONF}\" to install."; \ false .endif mkdir -p ${DESTDIR}/${DISTDIR} .if defined(NO_ROOT) @echo "#${MTREE_MAGIC}" > ${DESTDIR}/${DISTDIR}/kernel.premeta .endif cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \ ${IMAKEENV} ${IMAKE_INSTALL:S/METALOG/kernel.premeta/} \ ${IMAKE_MTREE} PATH=${TMPPATH} ${MAKE} KERNEL=${INSTKERNNAME} \ DESTDIR=${INSTALL_DDIR}/kernel \ ${.TARGET:S/distributekernel/install/} .if defined(NO_ROOT) @sed -e 's|^./kernel|.|' ${DESTDIR}/${DISTDIR}/kernel.premeta > \ ${DESTDIR}/${DISTDIR}/kernel.meta .endif .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} .if defined(NO_ROOT) @echo "#${MTREE_MAGIC}" > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.premeta .endif cd ${KRNLOBJDIR}/${_kernel}; \ ${IMAKEENV} ${IMAKE_INSTALL:S/METALOG/kernel.${_kernel}.premeta/} \ ${IMAKE_MTREE} PATH=${TMPPATH} ${MAKE} \ KERNEL=${INSTKERNNAME}.${_kernel} \ DESTDIR=${INSTALL_DDIR}/kernel.${_kernel} \ ${.TARGET:S/distributekernel/install/} .if defined(NO_ROOT) @sed -e "s|^./kernel.${_kernel}|.|" \ ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.premeta > \ ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta .endif .endfor .endif packagekernel: .PHONY .if defined(NO_ROOT) .if !defined(NO_INSTALLKERNEL) cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --exclude '*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.txz .endif cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --include '*/*/*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.meta | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel-dbg.txz .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --exclude '*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.${_kernel}.txz cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --include '*/*/*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}-dbg.txz .endfor .endif .else .if !defined(NO_INSTALLKERNEL) cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --exclude '*.debug' . | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.txz .endif cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --include '*/*/*.debug' $$(eval find .) | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel-dbg.txz .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --exclude '*.debug' . | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.${_kernel}.txz cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --include '*/*/*.debug' $$(eval find .) | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}-dbg.txz .endfor .endif .endif stagekernel: .PHONY ${_+_}${MAKE} -C ${.CURDIR} ${.MAKEFLAGS} distributekernel PORTSDIR?= /usr/ports WSTAGEDIR?= ${MAKEOBJDIRPREFIX}${.CURDIR}/${TARGET}.${TARGET_ARCH}/worldstage KSTAGEDIR?= ${MAKEOBJDIRPREFIX}${.CURDIR}/${TARGET}.${TARGET_ARCH}/kernelstage REPODIR?= ${MAKEOBJDIRPREFIX}${.CURDIR}/repo PKGSIGNKEY?= # empty .ORDER: stage-packages create-packages .ORDER: create-packages create-world-packages .ORDER: create-packages create-kernel-packages .ORDER: create-packages sign-packages _pkgbootstrap: .PHONY .if !exists(${LOCALBASE}/sbin/pkg) @env ASSUME_ALWAYS_YES=YES pkg bootstrap .endif packages: .PHONY ${_+_}${MAKE} -C ${.CURDIR} PKG_VERSION=${PKG_VERSION} real-packages package-pkg: .PHONY rm -rf /tmp/ports.${TARGET} || : env ${WMAKEENV:Q} SRCDIR=${.CURDIR} PORTSDIR=${PORTSDIR} REVISION=${_REVISION} \ PKG_CMD=${PKG_CMD} PKG_VERSION=${PKG_VERSION} REPODIR=${REPODIR} \ WSTAGEDIR=${WSTAGEDIR} \ sh ${.CURDIR}/release/scripts/make-pkg-package.sh real-packages: stage-packages create-packages sign-packages .PHONY stage-packages: .PHONY @mkdir -p ${REPODIR} ${WSTAGEDIR} ${KSTAGEDIR} ${_+_}@cd ${.CURDIR}; \ ${MAKE} DESTDIR=${WSTAGEDIR} -DNO_ROOT -B stageworld ; \ ${MAKE} DESTDIR=${KSTAGEDIR} -DNO_ROOT -B stagekernel create-packages: _pkgbootstrap .PHONY @mkdir -p ${REPODIR} ${_+_}@cd ${.CURDIR}; \ ${MAKE} DESTDIR=${WSTAGEDIR} \ PKG_VERSION=${PKG_VERSION} create-world-packages ; \ ${MAKE} DESTDIR=${KSTAGEDIR} \ PKG_VERSION=${PKG_VERSION} DISTDIR=kernel \ create-kernel-packages create-world-packages: _pkgbootstrap .PHONY @rm -f ${WSTAGEDIR}/*.plist 2>/dev/null || : @cd ${WSTAGEDIR} ; \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \ ${WSTAGEDIR}/METALOG @for plist in ${WSTAGEDIR}/*.plist; do \ plist=$${plist##*/} ; \ pkgname=$${plist%.plist} ; \ sh ${SRCDIR}/release/packages/generate-ucl.sh -o $${pkgname} \ -s ${SRCDIR} -u ${WSTAGEDIR}/$${pkgname}.ucl ; \ done @for plist in ${WSTAGEDIR}/*.plist; do \ plist=$${plist##*/} ; \ pkgname=$${plist%.plist} ; \ awk -F\" ' \ /^name/ { printf("===> Creating %s-", $$2); next } \ /^version/ { print $$2; next } \ ' ${WSTAGEDIR}/$${pkgname}.ucl ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${WSTAGEDIR}/$${pkgname}.ucl \ -p ${WSTAGEDIR}/$${pkgname}.plist \ -r ${WSTAGEDIR} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} ; \ done create-kernel-packages: _pkgbootstrap .PHONY .if exists(${KSTAGEDIR}/kernel.meta) .for flavor in "" -debug @cd ${KSTAGEDIR}/${DISTDIR} ; \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \ -v kernel=yes -v _kernconf=${INSTALLKERNEL} \ ${KSTAGEDIR}/kernel.meta ; \ cap_arg=`cd ${SRCDIR}/etc ; ${MAKE} -VCAP_MKDB_ENDIAN` ; \ pwd_arg=`cd ${SRCDIR}/etc ; ${MAKE} -VPWD_MKDB_ENDIAN` ; \ sed -e "s/%VERSION%/${PKG_VERSION}/" \ -e "s/%PKGNAME%/kernel-${INSTALLKERNEL:tl}${flavor}/" \ -e "s/%COMMENT%/FreeBSD ${INSTALLKERNEL} kernel ${flavor}/" \ -e "s/%DESC%/FreeBSD ${INSTALLKERNEL} kernel ${flavor}/" \ -e "s/%CAP_MKDB_ENDIAN%/$${cap_arg}/g" \ -e "s/%PWD_MKDB_ENDIAN%/$${pwd_arg}/g" \ ${SRCDIR}/release/packages/kernel.ucl \ > ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl ; \ awk -F\" ' \ /name/ { printf("===> Creating %s-", $$2); next } \ /version/ {print $$2; next } ' \ ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl \ -p ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.plist \ -r ${KSTAGEDIR}/${DISTDIR} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} .endfor .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} .if exists(${KSTAGEDIR}/kernel.${_kernel}.meta) .for flavor in "" -debug @cd ${KSTAGEDIR}/kernel.${_kernel} ; \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \ -v kernel=yes -v _kernconf=${_kernel} \ ${KSTAGEDIR}/kernel.${_kernel}.meta ; \ cap_arg=`cd ${SRCDIR}/etc ; ${MAKE} -VCAP_MKDB_ENDIAN` ; \ pwd_arg=`cd ${SRCDIR}/etc ; ${MAKE} -VPWD_MKDB_ENDIAN` ; \ sed -e "s/%VERSION%/${PKG_VERSION}/" \ -e "s/%PKGNAME%/kernel-${_kernel:tl}${flavor}/" \ -e "s/%COMMENT%/FreeBSD ${_kernel} kernel ${flavor}/" \ -e "s/%DESC%/FreeBSD ${_kernel} kernel ${flavor}/" \ -e "s/%CAP_MKDB_ENDIAN%/$${cap_arg}/g" \ -e "s/%PWD_MKDB_ENDIAN%/$${pwd_arg}/g" \ ${SRCDIR}/release/packages/kernel.ucl \ > ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl ; \ awk -F\" ' \ /name/ { printf("===> Creating %s-", $$2); next } \ /version/ {print $$2; next } ' \ ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl \ -p ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.plist \ -r ${KSTAGEDIR}/kernel.${_kernel} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} .endfor .endif .endfor .endif sign-packages: _pkgbootstrap .PHONY @[ -L "${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/latest" ] && \ unlink ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/latest ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh repo \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} \ ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} \ ${PKGSIGNKEY} ; \ cd ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI); \ ln -s ${PKG_VERSION} latest # # # checkworld # # Run test suite on installed world. # checkworld: .PHONY @if [ ! -x "${LOCALBASE}/bin/kyua" ]; then \ echo "You need kyua (devel/kyua) to run the test suite." | /usr/bin/fmt; \ exit 1; \ fi ${_+_}PATH="$$PATH:${LOCALBASE}/bin" kyua test -k ${TESTSBASE}/Kyuafile # # # doxygen # # Build the API documentation with doxygen # doxygen: .PHONY @if [ ! -x "${LOCALBASE}/bin/doxygen" ]; then \ echo "You need doxygen (devel/doxygen) to generate the API documentation of the kernel." | /usr/bin/fmt; \ exit 1; \ fi ${_+_}cd ${.CURDIR}/tools/kerneldoc/subsys; ${MAKE} obj all # # update # # Update the source tree(s), by running svn/svnup to update to the # latest copy. # update: .PHONY .if defined(SVN_UPDATE) @echo "--------------------------------------------------------------" @echo ">>> Updating ${.CURDIR} using Subversion" @echo "--------------------------------------------------------------" @(cd ${.CURDIR}; ${SVN} update ${SVNFLAGS}) .endif # # ------------------------------------------------------------------------ # # From here onwards are utility targets used by the 'make world' and # related targets. If your 'world' breaks, you may like to try to fix # the problem and manually run the following targets to attempt to # complete the build. Beware, this is *not* guaranteed to work, you # need to have a pretty good grip on the current state of the system # to attempt to manually finish it. If in doubt, 'make world' again. # # # legacy: Build compatibility shims for the next three targets. This is a # minimal set of tools and shims necessary to compensate for older systems # which don't have the APIs required by the targets built in bootstrap-tools, # build-tools or cross-tools. # # ELF Tool Chain libraries are needed for ELF tools and dtrace tools. # r296685 fix cross-endian objcopy .if ${BOOTSTRAPPING} < 1100102 _elftoolchain_libs= lib/libelf lib/libdwarf .endif legacy: .PHONY # Temporary special case for automatically detecting the clang compiler issue # Note: 9.x didn't have FreeBSD_version bumps often enough, so you may need to # set BOOTSTRAPPING to 0 if you're stable/9 tree post-dates r286035 but is before # the version bump in r296219 (from July 29, 2015 -> Feb 29, 2016). .if ${BOOTSTRAPPING} != 0 && \ ${WANT_COMPILER_TYPE} == "clang" && ${COMPILER_TYPE} == "clang" && ${COMPILER_VERSION} < 30601 .if ${BOOTSTRAPPING} > 10000000 && ${BOOTSTRAPPING} < 1002501 @echo "ERROR: Source upgrades from stable/10 prior to r286033 are not supported."; false .elif ${BOOTSTRAPPING} > 9000000 && ${BOOTSTRAPPING} < 903509 @echo "ERROR: Source upgrades from stable/9 prior to r286035 are not supported."; false .endif .endif .if ${BOOTSTRAPPING} < ${MINIMUM_SUPPORTED_OSREL} && ${BOOTSTRAPPING} != 0 @echo "ERROR: Source upgrades from versions prior to ${MINIMUM_SUPPORTED_REL} are not supported."; \ false .endif .for _tool in tools/build ${_elftoolchain_libs} ${_+_}@${ECHODIR} "===> ${_tool} (obj,includes,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX}/legacy includes; \ ${MAKE} DIRPRFX=${_tool}/ MK_INCLUDES=no all; \ ${MAKE} DIRPRFX=${_tool}/ MK_INCLUDES=no \ DESTDIR=${MAKEOBJDIRPREFIX}/legacy install .endfor # # bootstrap-tools: Build tools needed for compatibility. These are binaries that # are built to build other binaries in the system. However, the focus of these # binaries is usually quite narrow. Bootstrap tools use the host's compiler and # libraries, augmented by -legacy. # _bt= _bootstrap-tools .if ${MK_GAMES} != "no" _strfile= usr.bin/fortune/strfile .endif .if ${MK_GCC} != "no" && ${MK_CXX} != "no" _gperf= gnu/usr.bin/gperf .endif .if ${MK_VT} != "no" _vtfontcvt= usr.bin/vtfontcvt .endif .if ${BOOTSTRAPPING} < 1000033 _libopenbsd= lib/libopenbsd _m4= usr.bin/m4 _lex= usr.bin/lex ${_bt}-usr.bin/m4: ${_bt}-lib/libopenbsd ${_bt}-usr.bin/lex: ${_bt}-usr.bin/m4 .endif # r245440 mtree -N support added # r313404 requires sha384.h for libnetbsd, added to libmd in r292782 .if ${BOOTSTRAPPING} < 1100093 _nmtree= lib/libmd \ lib/libnetbsd \ usr.sbin/nmtree ${_bt}-lib/libnetbsd: ${_bt}-lib/libmd ${_bt}-usr.sbin/nmtree: ${_bt}-lib/libnetbsd .endif # r246097: log addition login.conf.db, passwd, pwd.db, and spwd.db with cat -l .if ${BOOTSTRAPPING} < 1000027 _cat= bin/cat .endif # r277259 crunchide: Correct 64-bit section header offset # r281674 crunchide: always include both 32- and 64-bit ELF support .if ${BOOTSTRAPPING} < 1100078 _crunchide= usr.sbin/crunch/crunchide .endif # r285986 crunchen: use STRIPBIN rather than STRIP # 1100113: Support MK_AUTO_OBJ # 1200006: META_MODE fixes .if ${BOOTSTRAPPING} < 1100078 || \ (${MK_AUTO_OBJ} == "yes" && ${BOOTSTRAPPING} < 1100114) || \ (${MK_META_MODE} == "yes" && ${BOOTSTRAPPING} < 1200006) _crunchgen= usr.sbin/crunch/crunchgen .endif # r296926 -P keymap search path, MFC to stable/10 in r298297 .if ${BOOTSTRAPPING} < 1003501 || \ (${BOOTSTRAPPING} >= 1100000 && ${BOOTSTRAPPING} < 1100103) _kbdcontrol= usr.sbin/kbdcontrol .endif _yacc= lib/liby \ usr.bin/yacc ${_bt}-usr.bin/yacc: ${_bt}-lib/liby .if ${MK_BSNMP} != "no" _gensnmptree= usr.sbin/bsnmpd/gensnmptree .endif # We need to build tblgen when we're building clang or lld, either as # bootstrap tools, or as the part of the normal build. .if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_CLANG} != "no" || \ ${MK_LLD_BOOTSTRAP} != "no" || ${MK_LLD} != "no" _clang_tblgen= \ lib/clang/libllvmminimal \ usr.bin/clang/llvm-tblgen \ usr.bin/clang/clang-tblgen ${_bt}-usr.bin/clang/clang-tblgen: ${_bt}-lib/clang/libllvmminimal ${_bt}-usr.bin/clang/llvm-tblgen: ${_bt}-lib/clang/libllvmminimal .endif # Default to building the GPL DTC, but build the BSDL one if users explicitly # request it. _dtc= usr.bin/dtc .if ${MK_GPL_DTC} != "no" _dtc= gnu/usr.bin/dtc .endif .if ${MK_KERBEROS} != "no" _kerberos5_bootstrap_tools= \ kerberos5/tools/make-roken \ kerberos5/lib/libroken \ kerberos5/lib/libvers \ kerberos5/tools/asn1_compile \ kerberos5/tools/slc \ usr.bin/compile_et .ORDER: ${_kerberos5_bootstrap_tools:C/^/${_bt}-/g} .endif # r283777 makewhatis(1) replaced with mandoc version which builds a database. _libopenbsd?= lib/libopenbsd _makewhatis= usr.bin/mandoc ${_bt}-usr.bin/mandoc: ${_bt}-lib/libopenbsd bootstrap-tools: .PHONY # Please document (add comment) why something is in 'bootstrap-tools'. # Try to bound the building of the bootstrap-tool to just the # FreeBSD versions that need the tool built at this stage of the build. .for _tool in \ ${_clang_tblgen} \ ${_kerberos5_bootstrap_tools} \ ${_strfile} \ ${_gperf} \ ${_dtc} \ ${_cat} \ ${_kbdcontrol} \ usr.bin/lorder \ ${_libopenbsd} \ ${_makewhatis} \ usr.bin/rpcgen \ ${_yacc} \ ${_m4} \ ${_lex} \ usr.bin/xinstall \ ${_gensnmptree} \ usr.sbin/config \ ${_crunchide} \ ${_crunchgen} \ ${_nmtree} \ ${_vtfontcvt} \ usr.bin/localedef ${_bt}-${_tool}: .PHONY .MAKE ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX}/legacy install bootstrap-tools: ${_bt}-${_tool} .endfor # # build-tools: Build special purpose build tools # .if !defined(NO_SHARE) _share= share/syscons/scrnmaps .endif .if ${MK_GCC} != "no" _gcc_tools= gnu/usr.bin/cc/cc_tools .endif .if ${MK_RESCUE} != "no" # rescue includes programs that have build-tools targets _rescue=rescue/rescue .endif .for _tool in \ bin/csh \ bin/sh \ ${LOCAL_TOOL_DIRS} \ lib/ncurses/ncurses \ lib/ncurses/ncursesw \ ${_rescue} \ ${_share} \ usr.bin/awk \ lib/libmagic \ usr.bin/mkesdb_static \ usr.bin/mkcsmapper_static \ usr.bin/vi/catalog build-tools_${_tool}: .PHONY ${_+_}@${ECHODIR} "===> ${_tool} (obj,build-tools)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ build-tools build-tools: build-tools_${_tool} .endfor .for _tool in \ ${_gcc_tools} build-tools_${_tool}: .PHONY ${_+_}@${ECHODIR} "===> ${_tool} (obj,all)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ all build-tools: build-tools_${_tool} .endfor # # kernel-tools: Build kernel-building tools # kernel-tools: .PHONY mkdir -p ${MAKEOBJDIRPREFIX}/usr mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${MAKEOBJDIRPREFIX}/usr >/dev/null # # cross-tools: All the tools needed to build the rest of the system after # we get done with the earlier stages. It is the last set of tools needed # to begin building the target binaries. # .if ${TARGET_ARCH} != ${MACHINE_ARCH} .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" _btxld= usr.sbin/btxld .endif .endif # Rebuild ctfconvert and ctfmerge to avoid difficult-to-diagnose failures # resulting from missing bug fixes or ELF Toolchain updates. .if ${MK_CDDL} != "no" _dtrace_tools= cddl/lib/libctf cddl/usr.bin/ctfconvert \ cddl/usr.bin/ctfmerge .endif # If we're given an XAS, don't build binutils. .if ${XAS:M/*} == "" .if ${MK_BINUTILS_BOOTSTRAP} != "no" _binutils= gnu/usr.bin/binutils .endif .if ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no" _elftctools= lib/libelftc \ lib/libpe \ usr.bin/elfcopy \ usr.bin/nm \ usr.bin/size \ usr.bin/strings # These are not required by the build, but can be useful for developers who # cross-build on a FreeBSD 10 host: _elftctools+= usr.bin/addr2line .endif .elif ${TARGET_ARCH} != ${MACHINE_ARCH} && ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no" # If cross-building with an external binutils we still need to build strip for # the target (for at least crunchide). _elftctools= lib/libelftc \ lib/libpe \ usr.bin/elfcopy .endif .if ${MK_CLANG_BOOTSTRAP} != "no" _clang= usr.bin/clang .endif .if ${MK_LLD_BOOTSTRAP} != "no" _lld= usr.bin/clang/lld .endif .if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_LLD_BOOTSTRAP} != "no" _clang_libs= lib/clang .endif .if ${MK_GCC_BOOTSTRAP} != "no" _gcc= gnu/usr.bin/cc .endif .if ${MK_USB} != "no" _usb_tools= sys/boot/usb/tools .endif cross-tools: .MAKE .PHONY .for _tool in \ ${LOCAL_XTOOL_DIRS} \ ${_clang_libs} \ ${_clang} \ ${_lld} \ ${_binutils} \ ${_elftctools} \ ${_dtrace_tools} \ ${_gcc} \ ${_btxld} \ ${_usb_tools} ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX} install .endfor NXBDESTDIR= ${OBJTREE}/nxb-bin NXBENV= MAKEOBJDIRPREFIX=${OBJTREE}/nxb \ TOOLS_PREFIX= \ INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${PATH}:${OBJTREE}/gperf_for_gcc/usr/bin NXBMAKE= ${NXBENV} ${MAKE} \ LLVM_TBLGEN=${NXBDESTDIR}/usr/bin/llvm-tblgen \ CLANG_TBLGEN=${NXBDESTDIR}/usr/bin/clang-tblgen \ MACHINE=${TARGET} MACHINE_ARCH=${TARGET_ARCH} \ MK_GDB=no MK_TESTS=no \ SSP_CFLAGS= \ MK_HTML=no NO_LINT=yes MK_MAN=no \ -DNO_PIC MK_PROFILE=no -DNO_SHARED \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no \ MK_CLANG_EXTRAS=no MK_CLANG_FULL=no \ MK_LLDB=no MK_DEBUG_FILES=no # native-xtools is the current target for qemu-user cross builds of ports # via poudriere and the imgact_binmisc kernel module. # For non-clang enabled targets that are still using the in tree gcc # we must build a gperf binary for one instance of its Makefiles. On # clang-enabled systems, the gperf binary is obsolete. native-xtools: .PHONY .if ${MK_GCC_BOOTSTRAP} != "no" mkdir -p ${OBJTREE}/gperf_for_gcc/usr/bin ${_+_}@${ECHODIR} "===> ${_gperf} (obj,all,install)"; \ cd ${.CURDIR}/${_gperf}; \ if [ -z "${NO_OBJ}" ]; then ${NXBMAKE} DIRPRFX=${_gperf}/ obj; fi; \ ${NXBMAKE} DIRPRFX=${_gperf}/ all; \ ${NXBMAKE} DIRPRFX=${_gperf}/ DESTDIR=${OBJTREE}/gperf_for_gcc install .endif mkdir -p ${NXBDESTDIR}/bin ${NXBDESTDIR}/sbin ${NXBDESTDIR}/usr mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${NXBDESTDIR}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${NXBDESTDIR}/usr/include >/dev/null .if ${MK_DEBUG_FILES} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${NXBDESTDIR}/usr/lib >/dev/null .endif .for _tool in \ bin/cat \ bin/chmod \ bin/cp \ bin/csh \ bin/echo \ bin/expr \ bin/hostname \ bin/ln \ bin/ls \ bin/mkdir \ bin/mv \ bin/ps \ bin/realpath \ bin/rm \ bin/rmdir \ bin/sh \ bin/sleep \ ${_clang_tblgen} \ usr.bin/ar \ ${_binutils} \ ${_elftctools} \ ${_gcc} \ ${_gcc_tools} \ ${_clang_libs} \ ${_clang} \ + ${_lld} \ sbin/md5 \ sbin/sysctl \ usr.bin/diff \ usr.bin/awk \ usr.bin/basename \ usr.bin/bmake \ usr.bin/bzip2 \ usr.bin/cmp \ usr.bin/dirname \ usr.bin/env \ usr.bin/fetch \ usr.bin/find \ usr.bin/grep \ usr.bin/gzip \ usr.bin/id \ usr.bin/lex \ usr.bin/limits \ usr.bin/lorder \ usr.bin/mktemp \ usr.bin/mt \ usr.bin/patch \ usr.bin/readelf \ usr.bin/sed \ usr.bin/sort \ usr.bin/tar \ usr.bin/touch \ usr.bin/tr \ usr.bin/true \ usr.bin/uniq \ usr.bin/unzip \ usr.bin/xargs \ usr.bin/xinstall \ usr.bin/xz \ usr.bin/yacc \ usr.sbin/chown ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${NXBMAKE} DIRPRFX=${_tool}/ obj; fi; \ ${NXBMAKE} DIRPRFX=${_tool}/ all; \ ${NXBMAKE} DIRPRFX=${_tool}/ DESTDIR=${NXBDESTDIR} install .endfor # # hierarchy - ensure that all the needed directories are present # hierarchy hier: .MAKE .PHONY ${_+_}cd ${.CURDIR}/etc; ${HMAKE} distrib-dirs # # libraries - build all libraries, and install them under ${DESTDIR}. # # The list of libraries with dependents (${_prebuild_libs}) and their # interdependencies (__L) are built automatically by the # ${.CURDIR}/tools/make_libdeps.sh script. # libraries: .MAKE .PHONY ${_+_}cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 _prereq_libs; \ ${MAKE} -f Makefile.inc1 _startup_libs; \ ${MAKE} -f Makefile.inc1 _prebuild_libs; \ ${MAKE} -f Makefile.inc1 _generic_libs # # static libgcc.a prerequisite for shared libc # _prereq_libs= lib/libcompiler_rt .if ${MK_SSP} != "no" _prereq_libs+= gnu/lib/libssp/libssp_nonshared .endif # These dependencies are not automatically generated: # # gnu/lib/csu, gnu/lib/libgcc, lib/csu and lib/libc must be built before # all shared libraries for ELF. # _startup_libs= gnu/lib/csu _startup_libs+= lib/csu _startup_libs+= lib/libcompiler_rt _startup_libs+= lib/libc _startup_libs+= lib/libc_nonshared .if ${MK_LIBCPLUSPLUS} != "no" _startup_libs+= lib/libcxxrt .endif .if ${MK_LLVM_LIBUNWIND} != "no" _prereq_libs+= lib/libgcc_eh lib/libgcc_s _startup_libs+= lib/libgcc_eh lib/libgcc_s lib/libgcc_s__L: lib/libc__L lib/libgcc_s__L: lib/libc_nonshared__L .if ${MK_LIBCPLUSPLUS} != "no" lib/libcxxrt__L: lib/libgcc_s__L .endif .else # MK_LLVM_LIBUNWIND == no _prereq_libs+= gnu/lib/libgcc _startup_libs+= gnu/lib/libgcc gnu/lib/libgcc__L: lib/libc__L gnu/lib/libgcc__L: lib/libc_nonshared__L .if ${MK_LIBCPLUSPLUS} != "no" lib/libcxxrt__L: gnu/lib/libgcc__L .endif .endif _prebuild_libs= ${_kerberos5_lib_libasn1} \ ${_kerberos5_lib_libhdb} \ ${_kerberos5_lib_libheimbase} \ ${_kerberos5_lib_libheimntlm} \ ${_libsqlite3} \ ${_kerberos5_lib_libheimipcc} \ ${_kerberos5_lib_libhx509} ${_kerberos5_lib_libkrb5} \ ${_kerberos5_lib_libroken} \ ${_kerberos5_lib_libwind} \ lib/libbz2 ${_libcom_err} lib/libcrypt \ lib/libelf lib/libexpat \ lib/libfigpar \ ${_lib_libgssapi} \ lib/libkiconv lib/libkvm lib/liblzma lib/libmd lib/libnv \ ${_lib_casper} \ lib/ncurses/ncurses lib/ncurses/ncursesw \ lib/libopie lib/libpam/libpam ${_lib_libthr} \ ${_lib_libradius} lib/libsbuf lib/libtacplus \ lib/libgeom \ ${_cddl_lib_libumem} ${_cddl_lib_libnvpair} \ ${_cddl_lib_libuutil} \ ${_cddl_lib_libavl} \ ${_cddl_lib_libzfs_core} \ ${_cddl_lib_libctf} \ lib/libutil lib/libpjdlog ${_lib_libypclnt} lib/libz lib/msun \ ${_secure_lib_libcrypto} ${_lib_libldns} \ ${_secure_lib_libssh} ${_secure_lib_libssl} .if ${MK_GNUCXX} != "no" _prebuild_libs+= gnu/lib/libstdc++ gnu/lib/libsupc++ gnu/lib/libstdc++__L: lib/msun__L gnu/lib/libsupc++__L: gnu/lib/libstdc++__L .endif .if ${MK_DIALOG} != "no" _prebuild_libs+= gnu/lib/libdialog gnu/lib/libdialog__L: lib/msun__L lib/ncurses/ncursesw__L .endif .if ${MK_LIBCPLUSPLUS} != "no" _prebuild_libs+= lib/libc++ .endif lib/libgeom__L: lib/libexpat__L lib/libkvm__L: lib/libelf__L .if ${MK_LIBTHR} != "no" _lib_libthr= lib/libthr .endif .if ${MK_RADIUS_SUPPORT} != "no" _lib_libradius= lib/libradius .endif .if ${MK_OFED} != "no" _ofed_lib= contrib/ofed/usr.lib _prebuild_libs+= contrib/ofed/usr.lib/libosmcomp _prebuild_libs+= contrib/ofed/usr.lib/libopensm _prebuild_libs+= contrib/ofed/usr.lib/libibcommon _prebuild_libs+= contrib/ofed/usr.lib/libibverbs _prebuild_libs+= contrib/ofed/usr.lib/libibumad contrib/ofed/usr.lib/libopensm__L: lib/libthr__L contrib/ofed/usr.lib/libosmcomp__L: lib/libthr__L contrib/ofed/usr.lib/libibumad__L: contrib/ofed/usr.lib/libibcommon__L .endif .if ${MK_CASPER} != "no" _lib_casper= lib/libcasper .endif lib/libpjdlog__L: lib/libutil__L lib/libcasper__L: lib/libnv__L lib/liblzma__L: lib/libthr__L _generic_libs= ${_cddl_lib} gnu/lib ${_kerberos5_lib} lib ${_secure_lib} usr.bin/lex/lib ${_ofed_lib} .for _DIR in ${LOCAL_LIB_DIRS} .if exists(${.CURDIR}/${_DIR}/Makefile) && empty(_generic_libs:M${_DIR}) _generic_libs+= ${_DIR} .endif .endfor lib/libopie__L lib/libtacplus__L: lib/libmd__L .if ${MK_CDDL} != "no" _cddl_lib_libumem= cddl/lib/libumem _cddl_lib_libnvpair= cddl/lib/libnvpair _cddl_lib_libavl= cddl/lib/libavl _cddl_lib_libuutil= cddl/lib/libuutil _cddl_lib_libzfs_core= cddl/lib/libzfs_core _cddl_lib_libctf= cddl/lib/libctf _cddl_lib= cddl/lib cddl/lib/libzfs_core__L: cddl/lib/libnvpair__L cddl/lib/libzfs__L: lib/libgeom__L cddl/lib/libctf__L: lib/libz__L .endif # cddl/lib/libdtrace requires lib/libproc and lib/librtld_db; it's only built # on select architectures though (see cddl/lib/Makefile) .if ${MACHINE_CPUARCH} != "sparc64" _prebuild_libs+= lib/libprocstat lib/libproc lib/librtld_db lib/libprocstat__L: lib/libelf__L lib/libkvm__L lib/libutil__L lib/libproc__L: lib/libprocstat__L lib/librtld_db__L: lib/libprocstat__L .endif .if ${MK_CRYPT} != "no" .if ${MK_OPENSSL} != "no" _secure_lib_libcrypto= secure/lib/libcrypto _secure_lib_libssl= secure/lib/libssl lib/libradius__L secure/lib/libssl__L: secure/lib/libcrypto__L .if ${MK_LDNS} != "no" _lib_libldns= lib/libldns lib/libldns__L: secure/lib/libcrypto__L .endif .if ${MK_OPENSSH} != "no" _secure_lib_libssh= secure/lib/libssh secure/lib/libssh__L: lib/libz__L secure/lib/libcrypto__L lib/libcrypt__L .if ${MK_LDNS} != "no" secure/lib/libssh__L: lib/libldns__L .endif .if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no" secure/lib/libssh__L: lib/libgssapi__L kerberos5/lib/libkrb5__L \ kerberos5/lib/libhx509__L kerberos5/lib/libasn1__L lib/libcom_err__L \ lib/libmd__L kerberos5/lib/libroken__L .endif .endif .endif _secure_lib= secure/lib .endif .if ${MK_KERBEROS} != "no" kerberos5/lib/libasn1__L: lib/libcom_err__L kerberos5/lib/libroken__L kerberos5/lib/libhdb__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ kerberos5/lib/libkrb5__L kerberos5/lib/libroken__L \ kerberos5/lib/libwind__L lib/libsqlite3__L kerberos5/lib/libheimntlm__L: secure/lib/libcrypto__L kerberos5/lib/libkrb5__L \ kerberos5/lib/libroken__L lib/libcom_err__L kerberos5/lib/libhx509__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ secure/lib/libcrypto__L kerberos5/lib/libroken__L kerberos5/lib/libwind__L kerberos5/lib/libkrb5__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ lib/libcrypt__L secure/lib/libcrypto__L kerberos5/lib/libhx509__L \ kerberos5/lib/libroken__L kerberos5/lib/libwind__L \ kerberos5/lib/libheimbase__L kerberos5/lib/libheimipcc__L kerberos5/lib/libroken__L: lib/libcrypt__L kerberos5/lib/libwind__L: kerberos5/lib/libroken__L lib/libcom_err__L kerberos5/lib/libheimbase__L: lib/libthr__L kerberos5/lib/libheimipcc__L: kerberos5/lib/libroken__L kerberos5/lib/libheimbase__L lib/libthr__L .endif lib/libsqlite3__L: lib/libthr__L .if ${MK_GSSAPI} != "no" _lib_libgssapi= lib/libgssapi .endif .if ${MK_KERBEROS} != "no" _kerberos5_lib= kerberos5/lib _kerberos5_lib_libasn1= kerberos5/lib/libasn1 _kerberos5_lib_libhdb= kerberos5/lib/libhdb _kerberos5_lib_libheimbase= kerberos5/lib/libheimbase _kerberos5_lib_libkrb5= kerberos5/lib/libkrb5 _kerberos5_lib_libhx509= kerberos5/lib/libhx509 _kerberos5_lib_libroken= kerberos5/lib/libroken _kerberos5_lib_libheimntlm= kerberos5/lib/libheimntlm _libsqlite3= lib/libsqlite3 _kerberos5_lib_libheimipcc= kerberos5/lib/libheimipcc _kerberos5_lib_libwind= kerberos5/lib/libwind _libcom_err= lib/libcom_err .endif .if ${MK_NIS} != "no" _lib_libypclnt= lib/libypclnt .endif .if ${MK_OPENSSL} == "no" lib/libradius__L: lib/libmd__L .endif lib/libproc__L: \ ${_cddl_lib_libctf:D${_cddl_lib_libctf}__L} lib/libelf__L lib/librtld_db__L lib/libutil__L .if ${MK_CXX} != "no" .if ${MK_LIBCPLUSPLUS} != "no" lib/libproc__L: lib/libcxxrt__L .else # This implies MK_GNUCXX != "no"; see lib/libproc lib/libproc__L: gnu/lib/libsupc++__L .endif .endif .for _lib in ${_prereq_libs} ${_lib}__PL: .PHONY .MAKE .if exists(${.CURDIR}/${_lib}) ${_+_}@${ECHODIR} "===> ${_lib} (obj,all,install)"; \ cd ${.CURDIR}/${_lib}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ obj; fi; \ ${MAKE} MK_TESTS=no MK_PROFILE=no -DNO_PIC \ DIRPRFX=${_lib}/ all; \ ${MAKE} MK_TESTS=no MK_PROFILE=no -DNO_PIC \ DIRPRFX=${_lib}/ install .endif .endfor .for _lib in ${_startup_libs} ${_prebuild_libs} ${_generic_libs} ${_lib}__L: .PHONY .MAKE .if exists(${.CURDIR}/${_lib}) ${_+_}@${ECHODIR} "===> ${_lib} (obj,all,install)"; \ cd ${.CURDIR}/${_lib}; \ if [ -z "${NO_OBJ}" ]; then ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ obj; fi; \ ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ all; \ ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ install .endif .endfor _prereq_libs: ${_prereq_libs:S/$/__PL/} _startup_libs: ${_startup_libs:S/$/__L/} _prebuild_libs: ${_prebuild_libs:S/$/__L/} _generic_libs: ${_generic_libs:S/$/__L/} # Enable SUBDIR_PARALLEL when not calling 'make all', unless called from # 'everything' with _PARALLEL_SUBDIR_OK set. This is because it is unlikely # that running 'make all' from the top-level, especially with a SUBDIR_OVERRIDE # or LOCAL_DIRS set, will have a reliable build if SUBDIRs are built in # parallel. This is safe for the world stage of buildworld though since it has # already built libraries in a proper order and installed includes into # WORLDTMP. Special handling is done for SUBDIR ordering for 'install*' to # avoid trashing a system if it crashes mid-install. .if !make(all) || defined(_PARALLEL_SUBDIR_OK) SUBDIR_PARALLEL= .endif .include .if make(check-old) || make(check-old-dirs) || \ make(check-old-files) || make(check-old-libs) || \ make(delete-old) || make(delete-old-dirs) || \ make(delete-old-files) || make(delete-old-libs) # # check for / delete old files section # .include "ObsoleteFiles.inc" OLD_LIBS_MESSAGE="Please be sure no application still uses those libraries, \ else you can not start such an application. Consult UPDATING for more \ information regarding how to cope with the removal/revision bump of a \ specific library." .if !defined(BATCH_DELETE_OLD_FILES) RM_I=-i .else RM_I=-v .endif delete-old-files: .PHONY @echo ">>> Removing old files (only deletes safe to delete libs)" # Ask for every old file if the user really wants to remove it. # It's annoying, but better safe than sorry. # NB: We cannot pass the list of OLD_FILES as a parameter because the # argument list will get too long. Using .for/.endfor make "loops" will make # the Makefile parser segfault. @exec 3<&0; \ cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_FILES -V "OLD_FILES:Musr/share/*.gz:R" | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \ rm ${RM_I} "${DESTDIR}/$${file}" <&3; \ fi; \ for ext in debug symbols; do \ if ! [ -e "${DESTDIR}/$${file}" ] && [ -f \ "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ rm ${RM_I} "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" \ <&3; \ fi; \ done; \ done # Remove catpages without corresponding manpages. @exec 3<&0; \ find ${DESTDIR}/usr/share/man/cat* ! -type d | \ sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \ while read catpage; do \ read manpage; \ if [ ! -e "$${manpage}" ]; then \ rm ${RM_I} $${catpage} <&3; \ fi; \ done @echo ">>> Old files removed" check-old-files: .PHONY @echo ">>> Checking for old files" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_FILES -V "OLD_FILES:Musr/share/*.gz:R" | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ echo "${DESTDIR}/$${file}"; \ fi; \ for ext in debug symbols; do \ if [ -f "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}"; \ fi; \ done; \ done # Check for catpages without corresponding manpages. @find ${DESTDIR}/usr/share/man/cat* ! -type d | \ sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \ while read catpage; do \ read manpage; \ if [ ! -e "$${manpage}" ]; then \ echo $${catpage}; \ fi; \ done delete-old-libs: .PHONY @echo ">>> Removing old libraries" @echo "${OLD_LIBS_MESSAGE}" | fmt @exec 3<&0; \ cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_LIBS | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \ rm ${RM_I} "${DESTDIR}/$${file}" <&3; \ fi; \ for ext in debug symbols; do \ if ! [ -e "${DESTDIR}/$${file}" ] && [ -f \ "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ rm ${RM_I} "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" \ <&3; \ fi; \ done; \ done @echo ">>> Old libraries removed" check-old-libs: .PHONY @echo ">>> Checking for old libraries" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_LIBS | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ echo "${DESTDIR}/$${file}"; \ fi; \ for ext in debug symbols; do \ if [ -f "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}"; \ fi; \ done; \ done delete-old-dirs: .PHONY @echo ">>> Removing old directories" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_DIRS | xargs -n1 | sort -r | \ while read dir; do \ if [ -d "${DESTDIR}/$${dir}" ]; then \ rmdir -v "${DESTDIR}/$${dir}" || true; \ elif [ -L "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ if [ -d "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ rmdir -v "${DESTDIR}${DEBUGDIR}/$${dir}" || true; \ elif [ -L "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ done @echo ">>> Old directories removed" check-old-dirs: .PHONY @echo ">>> Checking for old directories" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_DIRS | xargs -n1 | \ while read dir; do \ if [ -d "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir}"; \ elif [ -L "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ if [ -d "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir}"; \ elif [ -L "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ done delete-old: delete-old-files delete-old-dirs .PHONY @echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'." check-old: check-old-files check-old-libs check-old-dirs .PHONY @echo "To remove old files and directories run '${MAKE_CMD} delete-old'." @echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'." .endif # # showconfig - show build configuration. # showconfig: .PHONY @(${MAKE} -n -f ${.CURDIR}/sys/conf/kern.opts.mk -V dummy -dg1; \ ${MAKE} -n -f ${.CURDIR}/share/mk/src.opts.mk -V dummy -dg1) 2>&1 | grep ^MK_ | sort -u .if !empty(KRNLOBJDIR) && !empty(KERNCONF) DTBOUTPUTPATH= ${KRNLOBJDIR}/${KERNCONF}/ .if !defined(FDT_DTS_FILE) || empty(FDT_DTS_FILE) .if exists(${KERNCONFDIR}/${KERNCONF}) FDT_DTS_FILE!= awk 'BEGIN {FS="="} /^makeoptions[[:space:]]+FDT_DTS_FILE/ {print $$2}' \ '${KERNCONFDIR}/${KERNCONF}' ; echo .endif .endif .endif .if !defined(DTBOUTPUTPATH) || !exists(${DTBOUTPUTPATH}) DTBOUTPUTPATH= ${.CURDIR} .endif # # Build 'standalone' Device Tree Blob # builddtb: .PHONY @PATH=${TMPPATH} MACHINE=${TARGET} \ ${.CURDIR}/sys/tools/fdt/make_dtb.sh ${.CURDIR}/sys \ "${FDT_DTS_FILE}" ${DTBOUTPUTPATH} ############### # cleanworld # In the following, the first 'rm' in a series will usually remove all # files and directories. If it does not, then there are probably some # files with file flags set, so this unsets them and tries the 'rm' a # second time. There are situations where this target will be cleaning # some directories via more than one method, but that duplication is # needed to correctly handle all the possible situations. Removing all # files without file flags set in the first 'rm' instance saves time, # because 'chflags' will need to operate on fewer files afterwards. # # It is expected that BW_CANONICALOBJDIR == the CANONICALOBJDIR as would be # created by bsd.obj.mk, except that we don't want to .include that file # in this makefile. # BW_CANONICALOBJDIR:=${OBJTREE}${.CURDIR} cleanworld: .PHONY .if exists(${BW_CANONICALOBJDIR}/) -rm -rf ${BW_CANONICALOBJDIR}/* -chflags -R 0 ${BW_CANONICALOBJDIR} rm -rf ${BW_CANONICALOBJDIR}/* .endif .if ${.CURDIR} == ${.OBJDIR} || ${.CURDIR}/obj == ${.OBJDIR} # To be safe in this case, fall back to a 'make cleandir' ${_+_}@cd ${.CURDIR}; ${MAKE} cleandir .endif .if defined(TARGET) && defined(TARGET_ARCH) .if ${TARGET} == ${MACHINE} && ${TARGET_ARCH} == ${MACHINE_ARCH} XDEV_CPUTYPE?=${CPUTYPE} .else XDEV_CPUTYPE?=${TARGET_CPUTYPE} .endif NOFUN=-DNO_FSCHG MK_HTML=no -DNO_LINT \ MK_MAN=no MK_NLS=no MK_PROFILE=no \ MK_KERBEROS=no MK_RESCUE=no MK_TESTS=no MK_WARNS=no \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ CPUTYPE=${XDEV_CPUTYPE} XDDIR=${TARGET_ARCH}-freebsd XDTP?=/usr/${XDDIR} .if ${XDTP:N/*} .error XDTP variable should be an absolute path .endif CDBENV=MAKEOBJDIRPREFIX=${MAKEOBJDIRPREFIX}/${XDDIR} \ INSTALL="sh ${.CURDIR}/tools/install.sh" CDENV= ${CDBENV} \ TOOLS_PREFIX=${XDTP} .if ${WANT_COMPILER_TYPE} == gcc || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc) # GCC requires -isystem and -L when using a cross-compiler. --sysroot # won't set header path and -L is used to ensure the base library path # is added before the port PREFIX library path. CD2CFLAGS+= -isystem ${XDDESTDIR}/usr/include -L${XDDESTDIR}/usr/lib # GCC requires -B to find /usr/lib/crti.o when using a cross-compiler # combined with --sysroot. CD2CFLAGS+= -B${XDDESTDIR}/usr/lib # Force using libc++ for external GCC. # XXX: This should be checking MK_GNUCXX == no .if ${X_COMPILER_VERSION} >= 40800 CD2CXXFLAGS+= -isystem ${XDDESTDIR}/usr/include/c++/v1 -std=c++11 \ -nostdinc++ .endif .endif CD2CFLAGS+= --sysroot=${XDDESTDIR}/ CD2ENV=${CDENV} CC="${CC} ${CD2CFLAGS}" CXX="${CXX} ${CD2CXXFLAGS} ${CD2CFLAGS}" \ CPP="${CPP} ${CD2CFLAGS}" \ MACHINE=${TARGET} MACHINE_ARCH=${TARGET_ARCH} CDTMP= ${MAKEOBJDIRPREFIX}/${XDDIR}/${.CURDIR}/tmp CDMAKE=${CDENV} PATH=${CDTMP}/usr/bin:${PATH} ${MAKE} ${NOFUN} CD2MAKE=${CD2ENV} PATH=${CDTMP}/usr/bin:${XDDESTDIR}/usr/bin:${PATH} ${MAKE} ${NOFUN} .if ${MK_META_MODE} != "no" # Don't rebuild build-tools targets during normal build. CD2MAKE+= BUILD_TOOLS_META=.NOMETA .endif XDDESTDIR=${DESTDIR}/${XDTP} .if !defined(OSREL) OSREL!= uname -r | sed -e 's/[-(].*//' .endif .ORDER: xdev-build xdev-install xdev-links xdev: xdev-build xdev-install .PHONY .ORDER: _xb-worldtmp _xb-bootstrap-tools _xb-build-tools _xb-cross-tools xdev-build: _xb-worldtmp _xb-bootstrap-tools _xb-build-tools _xb-cross-tools .PHONY _xb-worldtmp: .PHONY mkdir -p ${CDTMP}/usr mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${CDTMP}/usr >/dev/null _xb-bootstrap-tools: .PHONY .for _tool in \ ${_clang_tblgen} \ ${_gperf} \ ${_yacc} ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${CDMAKE} DIRPRFX=${_tool}/ obj; fi; \ ${CDMAKE} DIRPRFX=${_tool}/ all; \ ${CDMAKE} DIRPRFX=${_tool}/ DESTDIR=${CDTMP} install .endfor _xb-build-tools: .PHONY ${_+_}@cd ${.CURDIR}; \ ${CDBENV} ${MAKE} -f Makefile.inc1 ${NOFUN} build-tools _xb-cross-tools: .PHONY .for _tool in \ ${_binutils} \ ${_elftctools} \ usr.bin/ar \ ${_clang_libs} \ ${_clang} \ ${_gcc} ${_+_}@${ECHODIR} "===> xdev ${_tool} (obj,all)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJ}" ]; then ${CDMAKE} DIRPRFX=${_tool}/ obj; fi; \ ${CDMAKE} DIRPRFX=${_tool}/ all .endfor _xi-mtree: .PHONY ${_+_}@${ECHODIR} "mtree populating ${XDDESTDIR}" mkdir -p ${XDDESTDIR} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.root.dist \ -p ${XDDESTDIR} >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${XDDESTDIR}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${XDDESTDIR}/usr/include >/dev/null .if defined(LIBCOMPAT) mtree -deU -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${XDDESTDIR}/usr >/dev/null .endif .if ${MK_TESTS} != "no" mkdir -p ${XDDESTDIR}${TESTSBASE} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${XDDESTDIR}${TESTSBASE} >/dev/null .endif .ORDER: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries xdev-install: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries .PHONY _xi-cross-tools: .PHONY @echo "_xi-cross-tools" .for _tool in \ ${_binutils} \ ${_elftctools} \ usr.bin/ar \ ${_clang_libs} \ ${_clang} \ ${_gcc} ${_+_}@${ECHODIR} "===> xdev ${_tool} (install)"; \ cd ${.CURDIR}/${_tool}; \ ${CDMAKE} DIRPRFX=${_tool}/ install DESTDIR=${XDDESTDIR} .endfor _xi-includes: .PHONY ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 includes \ DESTDIR=${XDDESTDIR} _xi-libraries: .PHONY ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 libraries \ DESTDIR=${XDDESTDIR} xdev-links: .PHONY ${_+_}cd ${XDDESTDIR}/usr/bin; \ mkdir -p ../../../../usr/bin; \ for i in *; do \ ln -sf ../../${XDTP}/usr/bin/$$i \ ../../../../usr/bin/${XDDIR}-$$i; \ ln -sf ../../${XDTP}/usr/bin/$$i \ ../../../../usr/bin/${XDDIR}${OSREL}-$$i; \ done .else xdev xdev-build xdev-install xdev-links: .PHONY @echo "*** Error: Both TARGET and TARGET_ARCH must be defined for \"${.TARGET}\" target" .endif Index: projects/clang500-import/ObsoleteFiles.inc =================================================================== --- projects/clang500-import/ObsoleteFiles.inc (revision 319548) +++ projects/clang500-import/ObsoleteFiles.inc (revision 319549) @@ -1,9081 +1,9083 @@ # # $FreeBSD$ # # This file lists old files (OLD_FILES), libraries (OLD_LIBS) and # directories (OLD_DIRS) which should get removed at an update. Recently # removed entries first (with the date as a comment). Dynamic libraries are # special cased (OLD_LIBS). Static libraries or the generic links to # the dynamic libraries (lib*.so) should (if you don't know why to make an # exception, make this a "must") be viewed as normal files (OLD_FILES). # # In case of a complete directory hierarchy the sorting is in depth first # order. # # The file is partitioned: OLD_FILES first, then OLD_LIBS and OLD_DIRS last. # # Before you commit changes to this file please check if any entries in # tools/build/mk/OptionalObsoleteFiles.inc can be removed. The following # command tells which files are listed more than once regardless of some # architecture specific conditionals, so you can not blindly trust the # output: # ( grep '+=' /usr/src/ObsoleteFiles.inc | sort -u ; \ # grep '+=' /usr/src/tools/build/mk/OptionalObsoleteFiles.inc | sort -u) | \ # sort | uniq -d # # To find regular duplicates not dependent on optional components, you can # also use something that will not give you false positives, e.g.: # for t in `make -V TARGETS universe`; do # __MAKE_CONF=/dev/null make -f Makefile.inc1 TARGET=$t \ # -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \ # xargs -n1 | sort | uniq -d; # done # # For optional components, you can use the following to see if some entries # in OptionalObsoleteFiles.inc have been obsoleted by ObsoleteFiles.inc # for o in tools/build/options/WITH*; do # __MAKE_CONF=/dev/null make -f Makefile.inc1 -D${o##*/} \ # -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \ # xargs -n1 | sort | uniq -d; # done # 2017mmdd: new clang import which bumps version from 4.0.0 to 5.0.0. OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/4.0.0/include/sanitizer OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_complex_builtins.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/4.0.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/4.0.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/altivec.h OLD_FILES+=usr/lib/clang/4.0.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/4.0.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/4.0.0/include/armintr.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/cpuid.h OLD_FILES+=usr/lib/clang/4.0.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/immintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/4.0.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/4.0.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/module.modulemap OLD_FILES+=usr/lib/clang/4.0.0/include/msa.h OLD_FILES+=usr/lib/clang/4.0.0/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/opencl-c.h OLD_FILES+=usr/lib/clang/4.0.0/include/pkuintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/vadefs.h OLD_FILES+=usr/lib/clang/4.0.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/4.0.0/include OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/4.0.0/lib/freebsd OLD_DIRS+=usr/lib/clang/4.0.0/lib OLD_DIRS+=usr/lib/clang/4.0.0 +# 20170601: remove stale manpage +OLD_FILES+=usr/share/man/man2/cap_rights_get.2.gz # 20170601: old libifconfig and libifc OLD_FILES+=usr/lib/libifc.a OLD_FILES+=usr/lib/libifc_p.a OLD_FILES+=usr/lib/libifconfig.a OLD_FILES+=usr/lib/libifconfig_p.a # 20170529: mount.conf(8) -> mount.conf(5) OLD_FILES+=usr/share/man/man8/mount.conf.8.gz # 20170525: remove misleading template OLD_FILES+=usr/share/misc/man.template # 20170525: disconnect the roff docs from the build OLD_FILES+=usr/share/doc/papers/beyond43.ascii.gz OLD_FILES+=usr/share/doc/papers/bio.ascii.gz OLD_FILES+=usr/share/doc/papers/contents.ascii.gz OLD_FILES+=usr/share/doc/papers/devfs.ascii.gz OLD_FILES+=usr/share/doc/papers/diskperf.ascii.gz OLD_FILES+=usr/share/doc/papers/fsinterface.ascii.gz OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz OLD_FILES+=usr/share/doc/papers/jail.ascii.gz OLD_FILES+=usr/share/doc/papers/kernmalloc.ascii.gz OLD_FILES+=usr/share/doc/papers/kerntune.ascii.gz OLD_FILES+=usr/share/doc/papers/malloc.ascii.gz OLD_FILES+=usr/share/doc/papers/newvm.ascii.gz OLD_FILES+=usr/share/doc/papers/releng.ascii.gz OLD_FILES+=usr/share/doc/papers/sysperf.ascii.gz OLD_FILES+=usr/share/doc/papers/timecounter.ascii.gz OLD_FILES+=usr/share/doc/psd/01.cacm/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/02.implement/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/03.iosys/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/04.uprog/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/05.sysman/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/06.Clang/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/12.make/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz OLD_FILES+=usr/share/doc/psd/15.yacc/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/16.lex/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/17.m4/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/18.gprof/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/20.ipctut/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/21.ipc/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/22.rpcgen/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/23.rpc/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/24.xdr/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/25.xdrrfc/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/26.rpcrfc/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/27.nfsrfc/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/Title.ascii.gz OLD_FILES+=usr/share/doc/psd/contents.ascii.gz OLD_FILES+=usr/share/doc/smm/01.setup/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/02.config/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/03.fsck/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/04.quotas/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/05.fastfs/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/06.nfs/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/07.lpd/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/08.sendmailop/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/11.timedop/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/12.timed/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/18.net/paper.ascii.gz OLD_FILES+=usr/share/doc/smm/Title.ascii.gz OLD_FILES+=usr/share/doc/smm/contents.ascii.gz OLD_FILES+=usr/share/doc/usd/04.csh/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/05.dc/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/06.bc/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/07.mail/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/10.exref/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/10.exref/summary.ascii.gz OLD_FILES+=usr/share/doc/usd/11.edit/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/12.vi/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/12.vi/summary.ascii.gz OLD_FILES+=usr/share/doc/usd/12.vi/viapwh.ascii.gz OLD_FILES+=usr/share/doc/usd/13.viref/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/18.msdiffs/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/19.memacros/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/20.meref/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/21.troff/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/22.trofftut/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/Title.ascii.gz OLD_FILES+=usr/share/doc/usd/contents.ascii.gz # 20170523: 64-bit inode support, library version bumps OLD_LIBS+=lib/libzfs.so.2 OLD_LIBS+=usr/lib/libarchive.so.6 OLD_LIBS+=usr/lib/libmilter.so.5 OLD_LIBS+=usr/lib32/libzfs.so.2 OLD_LIBS+=usr/lib32/libarchive.so.6 OLD_LIBS+=usr/lib32/libmilter.so.5 # 20170427: NATM configuration support removed OLD_FILES+=etc/rc.d/atm1 OLD_FILES+=etc/rc.d/atm2 OLD_FILES+=etc/rc.d/atm3 # 20170424: NATM support removed OLD_FILES+=rescue/atmconfig OLD_FILES+=sbin/atmconfig OLD_FILES+=usr/include/bsnmp/snmp_atm.h OLD_FILES+=usr/include/dev/utopia/idtphy.h OLD_FILES+=usr/include/dev/utopia/suni.h OLD_FILES+=usr/include/dev/utopia/utopia.h OLD_FILES+=usr/include/dev/utopia/utopia_priv.h OLD_DIRS+=usr/include/dev/utopia OLD_FILES+=usr/include/net/if_atm.h OLD_FILES+=usr/include/netgraph/atm/ng_atm.h OLD_FILES+=usr/include/netinet/if_atm.h OLD_FILES+=usr/include/netnatm/natm.h OLD_FILES+=usr/lib/debug/sbin/atmconfig.debug OLD_FILES+=usr/lib/debug/usr/lib/snmp_atm.so.6.debug OLD_FILES+=usr/lib/snmp_atm.so OLD_FILES+=usr/lib/snmp_atm.so.6 OLD_FILES+=usr/share/doc/atm/atmconfig.help OLD_FILES+=usr/share/doc/atm/atmconfig_device.help OLD_DIRS+=usr/share/doc/atm OLD_FILES+=usr/share/man/man3/snmp_atm.3.gz OLD_FILES+=usr/share/man/man4/en.4.gz OLD_FILES+=usr/share/man/man4/fatm.4.gz OLD_FILES+=usr/share/man/man4/hatm.4.gz OLD_FILES+=usr/share/man/man4/if_en.4.gz OLD_FILES+=usr/share/man/man4/if_fatm.4.gz OLD_FILES+=usr/share/man/man4/if_hatm.4.gz OLD_FILES+=usr/share/man/man4/if_patm.4.gz OLD_FILES+=usr/share/man/man4/natm.4.gz OLD_FILES+=usr/share/man/man4/natmip.4.gz OLD_FILES+=usr/share/man/man4/ng_atm.4.gz OLD_FILES+=usr/share/man/man4/patm.4.gz OLD_FILES+=usr/share/man/man4/utopia.4.gz OLD_FILES+=usr/share/man/man8/atmconfig.8.gz OLD_FILES+=usr/share/man/man9/utopia.9.gz OLD_FILES+=usr/share/snmp/defs/atm_freebsd.def OLD_FILES+=usr/share/snmp/defs/atm_tree.def OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM-FREEBSD-MIB.txt OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM.txt # 20170420: remove GNU diff OLD_FILES+=usr/share/man/man7/diff.7.gz # 20170322: rename to _test to match the FreeBSD test suite name scheme OLD_FILES+=usr/tests/usr.bin/col/col OLD_FILES+=usr/tests/usr.bin/diff/diff OLD_FILES+=usr/tests/usr.bin/ident/ident OLD_FILES+=usr/tests/usr.bin/mkimg/mkimg OLD_FILES+=usr/tests/usr.bin/sdiff/sdiff OLD_FILES+=usr/tests/usr.bin/soelim/soelim OLD_FILES+=usr/tests/usr.sbin/pw/pw_config OLD_FILES+=usr/tests/usr.sbin/pw/pw_etcdir OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupadd OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupdel OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupmod OLD_FILES+=usr/tests/usr.sbin/pw/pw_lock OLD_FILES+=usr/tests/usr.sbin/pw/pw_useradd OLD_FILES+=usr/tests/usr.sbin/pw/pw_userdel OLD_FILES+=usr/tests/usr.sbin/pw/pw_usermod OLD_FILES+=usr/tests/usr.sbin/pw/pw_usernext # 20170322: garbage collect old references to igb(4) OLD_FILES+=usr/share/man/man4/if_igb.4.gz OLD_FILES+=usr/share/man/man4/igb.4.gz # 20170319: io_test requires zh_TW.Big5 locale. OLD_FILES+=usr/tests/lib/libc/locale/io_test # 20170319: remove nls for non supported Big5* locales OLD_DIRS+=usr/share/nls/zh_HK.Big5HKSCS OLD_DIRS+=usr/share/nls/zh_TW.Big5 # 20170313: move .../sys/geom/eli/... to .../sys/geom/class/eli/... OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/pbkdf2 OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/Kyuafile OLD_FILES+=usr/tests/sys/geom/eli/Kyuafile OLD_DIRS+=usr/tests/sys/geom/eli/pbkdf2 OLD_DIRS+=usr/tests/sys/geom/eli # 20170313: sbin/ipftest and ipresend temporarily disconnected. OLD_FILES+=sbin/ipftest OLD_FILES+=sbin/ipresend # 20170311: Remove WITHOUT_MANDOCDB option OLD_FILES+=usr/share/man/man1/makewhatis.1.gz # 20170308: rename some tests OLD_FILES+=usr/tests/bin/pwait/pwait OLD_FILES+=usr/tests/usr.bin/timeout/timeout # 20170307: remove pcap-int.h OLD_FILES+=usr/include/pcap-int.h # 20170302: new libc++ import which bumps version from 3.9.1 to 4.0.0. OLD_FILES+=usr/include/c++/v1/__undef___deallocate OLD_FILES+=usr/include/c++/v1/tr1/__undef___deallocate # 20170302: new clang import which bumps version from 3.9.1 to 4.0.0. OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.9.1/include/sanitizer OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/3.9.1/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.9.1/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/altivec.h OLD_FILES+=usr/lib/clang/3.9.1/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.9.1/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/cpuid.h OLD_FILES+=usr/lib/clang/3.9.1/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.9.1/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/immintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.9.1/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.9.1/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/module.modulemap OLD_FILES+=usr/lib/clang/3.9.1/include/msa.h OLD_FILES+=usr/lib/clang/3.9.1/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/opencl-c.h OLD_FILES+=usr/lib/clang/3.9.1/include/pkuintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/vadefs.h OLD_FILES+=usr/lib/clang/3.9.1/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.9.1/include OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.9.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.9.1/lib OLD_DIRS+=usr/lib/clang/3.9.1 # 20170226: SVR4 compatibility removed .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/streams.4 OLD_FILES+=usr/share/man/man4/svr4.4 .endif # 20170219: OpenPAM RADULA upgrade removed the libpam tests OLD_FILES+=usr/tests/lib/libpam/Kyuafile OLD_FILES+=usr/tests/lib/libpam/t_openpam_ctype OLD_FILES+=usr/tests/lib/libpam/t_openpam_readlinev OLD_FILES+=usr/tests/lib/libpam/t_openpam_readword OLD_DIRS+=usr/test/lib/libpam # 20170206: remove bdes(1) OLD_FILES+=usr/bin/bdes OLD_FILES+=usr/lib/debug/usr/bin/bdes.debug OLD_FILES+=usr/share/man/man1/bdes.1.gz # 20170206: merged projects/ipsec OLD_FILES+=usr/include/netinet/ip_ipsec.h OLD_FILES+=usr/include/netinet6/ip6_ipsec.h # 20170128: remove pc98 support OLD_FILES+=usr/include/dev/ic/i8251.h OLD_FILES+=usr/include/dev/ic/i8255.h OLD_FILES+=usr/include/dev/ic/rsa.h OLD_FILES+=usr/include/dev/ic/wd33c93reg.h OLD_FILES+=usr/include/sys/disk/pc98.h OLD_FILES+=usr/include/sys/diskpc98.h OLD_FILES+=usr/share/man/man4/i386/ct.4.gz OLD_FILES+=usr/share/man/man4/i386/snc.4.gz OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.iso.kbd OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.kbd OLD_FILES+=usr/share/vt/keymaps/jp.pc98.iso.kbd OLD_FILES+=usr/share/vt/keymaps/jp.pc98.kbd # 20170110: Four files from ggate tests consolidated into one OLD_FILES+=usr/tests/sys/geom/class/gate/1_test OLD_FILES+=usr/tests/sys/geom/class/gate/2_test OLD_FILES+=usr/tests/sys/geom/class/gate/3_test OLD_FILES+=usr/tests/sys/geom/class/gate/conf.sh # 20170103: libbsnmptools.so made into an INTERNALLIB OLD_FILES+=usr/lib/libbsnmptools.a OLD_FILES+=usr/lib/libbsnmptools_p.a OLD_LIBS+=usr/lib/libbsnmptools.so.0 OLD_LIBS+=usr/lib/libbsnmptools.so # 20170102: sysdecode_getfsstat_flags() renamed to sysdecode_getfsstat_mode() OLD_FILES+=usr/share/man/man3/sysdecode_getfsstat_flags.3.gz # 20161230: libarchive ACL pax test renamed to test_acl_pax_posix1e.tar.uu OLD_FILES+=usr/tests/lib/libarchive/test_acl_pax.tar.uu # 20161229: Three files from gnop tests consolidated into one OLD_FILES+=usr/tests/sys/geom/class/nop/1_test OLD_FILES+=usr/tests/sys/geom/class/nop/2_test OLD_FILES+=usr/tests/sys/geom/class/nop/conf.sh # 20161217: new clang import which bumps version from 3.9.0 to 3.9.1. OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.9.0/include/sanitizer OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/3.9.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.9.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.9.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.9.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.9.0/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.9.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.9.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.9.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.9.0/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/opencl-c.h OLD_FILES+=usr/lib/clang/3.9.0/include/pkuintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/vadefs.h OLD_FILES+=usr/lib/clang/3.9.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.9.0/include OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.9.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.9.0/lib OLD_DIRS+=usr/lib/clang/3.9.0 # 20161205: libproc version bump OLD_LIBS+=usr/lib/libproc.so.3 OLD_LIBS+=usr/lib32/libproc.so.3 # 20161127: Remove vm_page_cache(9) OLD_FILES+=usr/share/man/man9/vm_page_cache.9.gz # 20161124: new clang import which bumps version from 3.8.0 to 3.9.0. OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.8.0/include/sanitizer OLD_FILES+=usr/lib/clang/3.8.0/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/3.8.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.8.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.8.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.8.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.8.0/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.8.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.8.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.8.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.8.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/pkuintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/vadefs.h OLD_FILES+=usr/lib/clang/3.8.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.8.0/include OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.8.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.8.0/lib OLD_DIRS+=usr/lib/clang/3.8.0 # 20161121: Hyper-V manuals only apply to amd64 and i386. .if ${TARGET_ARCH} != "amd64" && ${TARGET_ARCH} != "i386" OLD_FILES+=usr/share/man/man4/hv_kvp.4.gz OLD_FILES+=usr/share/man/man4/hv_netvsc.4.gz OLD_FILES+=usr/share/man/man4/hv_storvsc.4.gz OLD_FILES+=usr/share/man/man4/hv_utils.4.gz OLD_FILES+=usr/share/man/man4/hv_vmbus.4.gz OLD_FILES+=usr/share/man/man4/hv_vss.4.gz .endif # 20161118: Remove hv_ata_pci_disengage(4) OLD_FILES+=usr/share/man/man4/hv_ata_pci_disengage.4.gz # 20161017: urtwn(4) was merged into rtwn(4) OLD_FILES+=usr/share/man/man4/urtwn.4.gz OLD_FILES+=usr/share/man/man4/urtwnfw.4.gz # 20161015: Remove GNU rcs OLD_FILES+=usr/bin/ci OLD_FILES+=usr/bin/co OLD_FILES+=usr/bin/merge OLD_FILES+=usr/bin/rcs OLD_FILES+=usr/bin/rcsclean OLD_FILES+=usr/bin/rcsdiff OLD_FILES+=usr/bin/rcsfreeze OLD_FILES+=usr/bin/rcsmerge OLD_FILES+=usr/bin/rlog OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz OLD_DIRS+=usr/share/doc/psd/13.rcs OLD_FILES+=usr/share/man/man1/ci.1.gz OLD_FILES+=usr/share/man/man1/co.1.gz OLD_FILES+=usr/share/man/man1/merge.1.gz OLD_FILES+=usr/share/man/man1/rcs.1.gz OLD_FILES+=usr/share/man/man1/rcsclean.1.gz OLD_FILES+=usr/share/man/man1/rcsdiff.1.gz OLD_FILES+=usr/share/man/man1/rcsfreeze.1.gz OLD_FILES+=usr/share/man/man1/rcsintro.1.gz OLD_FILES+=usr/share/man/man1/rcsmerge.1.gz OLD_FILES+=usr/share/man/man1/rlog.1.gz OLD_FILES+=usr/share/man/man5/rcsfile.5.gz # 20161010: remove link to removed m_getclr(9) macro OLD_FILES+=usr/share/man/man9/m_getclr.9.gz # 20161003: MK_ELFCOPY_AS_OBJCOPY option retired OLD_FILES+=usr/bin/elfcopy OLD_FILES+=usr/share/man/man1/elfcopy.1.gz # 20160906: libkqueue tests moved to /usr/tests/sys/kqueue/libkqueue OLD_FILES+=usr/tests/sys/kqueue/kqtest OLD_FILES+=usr/tests/sys/kqueue/kqueue_test # 20160903: idle page zeroing support removed OLD_FILES+=usr/share/man/man9/pmap_zero_idle.9.gz # 20160901: Remove digi(4) OLD_FILES+=usr/share/man/man4/digi.4.gz # 20160819: Remove ie(4) OLD_FILES+=usr/share/man/man4/i386/ie.4.gz # 20160819: Remove spic(4) OLD_FILES+=usr/share/man/man4/spic.4.gz # 20160819: Remove wl(4) and wlconfig(8) OLD_FILES+=usr/share/man/man4/i386/wl.4.gz OLD_FILES+=usr/sbin/wlconfig OLD_FILES+=usr/share/man/man8/i386/wlconfig.8.gz # 20160819: Remove si(4) and sicontrol(8) OLD_FILES+=usr/share/man/man4/si.4.gz OLD_FILES+=usr/sbin/sicontrol OLD_FILES+=usr/share/man/man8/sicontrol.8.gz # 20160819: Remove scd(4) OLD_FILES+=usr/share/man/man4/scd.4.gz # 20160815: Remove mcd(4) OLD_FILES+=usr/share/man/man4/mcd.4.gz # 20160805: lockmgr_waiters(9) removed OLD_FILES+=usr/share/man/man9/lockmgr_waiters.9.gz # 20160703: POSIXify locales with variants OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hant_TW.UTF-8 OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hant_TW.Big5 OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hant_HK.UTF-8 OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.eucCN OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.UTF-8 OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.GBK OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB2312 OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB18030 OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/sr_Latn_RS.UTF-8 OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_COLLATE OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_CTYPE OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MONETARY OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_TIME OLD_DIRS+=usr/share/locale/sr_Latn_RS.ISO8859-2 OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.UTF-8 OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_COLLATE OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_CTYPE OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MONETARY OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_TIME OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.ISO8859-5 OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/mn_Cyrl_MN.UTF-8 OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/kk_Cyrl_KZ.UTF-8 # 20160608: removed pam_verbose_error OLD_LIBS+=usr/lib/libpam.so.5 OLD_LIBS+=usr/lib/pam_chroot.so.5 OLD_LIBS+=usr/lib/pam_deny.so.5 OLD_LIBS+=usr/lib/pam_echo.so.5 OLD_LIBS+=usr/lib/pam_exec.so.5 OLD_LIBS+=usr/lib/pam_ftpusers.so.5 OLD_LIBS+=usr/lib/pam_group.so.5 OLD_LIBS+=usr/lib/pam_guest.so.5 OLD_LIBS+=usr/lib/pam_krb5.so.5 OLD_LIBS+=usr/lib/pam_ksu.so.5 OLD_LIBS+=usr/lib/pam_lastlog.so.5 OLD_LIBS+=usr/lib/pam_login_access.so.5 OLD_LIBS+=usr/lib/pam_nologin.so.5 OLD_LIBS+=usr/lib/pam_opie.so.5 OLD_LIBS+=usr/lib/pam_opieaccess.so.5 OLD_LIBS+=usr/lib/pam_passwdqc.so.5 OLD_LIBS+=usr/lib/pam_permit.so.5 OLD_LIBS+=usr/lib/pam_radius.so.5 OLD_LIBS+=usr/lib/pam_rhosts.so.5 OLD_LIBS+=usr/lib/pam_rootok.so.5 OLD_LIBS+=usr/lib/pam_securetty.so.5 OLD_LIBS+=usr/lib/pam_self.so.5 OLD_LIBS+=usr/lib/pam_ssh.so.5 OLD_LIBS+=usr/lib/pam_tacplus.so.5 OLD_LIBS+=usr/lib/pam_unix.so.5 OLD_LIBS+=usr/lib32/libpam.so.5 OLD_LIBS+=usr/lib32/pam_chroot.so.5 OLD_LIBS+=usr/lib32/pam_deny.so.5 OLD_LIBS+=usr/lib32/pam_echo.so.5 OLD_LIBS+=usr/lib32/pam_exec.so.5 OLD_LIBS+=usr/lib32/pam_ftpusers.so.5 OLD_LIBS+=usr/lib32/pam_group.so.5 OLD_LIBS+=usr/lib32/pam_guest.so.5 OLD_LIBS+=usr/lib32/pam_krb5.so.5 OLD_LIBS+=usr/lib32/pam_ksu.so.5 OLD_LIBS+=usr/lib32/pam_lastlog.so.5 OLD_LIBS+=usr/lib32/pam_login_access.so.5 OLD_LIBS+=usr/lib32/pam_nologin.so.5 OLD_LIBS+=usr/lib32/pam_opie.so.5 OLD_LIBS+=usr/lib32/pam_opieaccess.so.5 OLD_LIBS+=usr/lib32/pam_passwdqc.so.5 OLD_LIBS+=usr/lib32/pam_permit.so.5 OLD_LIBS+=usr/lib32/pam_radius.so.5 OLD_LIBS+=usr/lib32/pam_rhosts.so.5 OLD_LIBS+=usr/lib32/pam_rootok.so.5 OLD_LIBS+=usr/lib32/pam_securetty.so.5 OLD_LIBS+=usr/lib32/pam_self.so.5 OLD_LIBS+=usr/lib32/pam_ssh.so.5 OLD_LIBS+=usr/lib32/pam_tacplus.so.5 OLD_LIBS+=usr/lib32/pam_unix.so.5 # 20160523: remove extranous ALTQ files OLD_FILES+=usr/include/altq/altq_codel.h OLD_FILES+=usr/include/altq/altq_fairq.h # 20160519: remove DTrace Toolkit from base OLD_FILES+=usr/sbin/dtruss OLD_FILES+=usr/share/dtrace/toolkit/execsnoop OLD_FILES+=usr/share/dtrace/toolkit/hotkernel OLD_FILES+=usr/share/dtrace/toolkit/hotuser OLD_FILES+=usr/share/dtrace/toolkit/opensnoop OLD_FILES+=usr/share/dtrace/toolkit/procsystime OLD_DIRS+=usr/share/dtrace/toolkit OLD_FILES+=usr/share/man/man1/dtruss.1.gz # 20160519: stale MLINK removed OLD_FILES+=usr/share/man/man9/rman_await_resource.9.gz # 20160517: ReiserFS removed OLD_FILES+=usr/share/man/man5/reiserfs.5.gz # 20160504: tests rework OLD_FILES+=usr/tests/lib/libc/regex/data/README # 20160430: kvm_getfiles(3) removed from kvm(3) OLD_LIBS+=lib/libkvm.so.6 OLD_LIBS+=usr/lib32/libkvm.so.6 OLD_FILES+=usr/share/man/man3/kvm_getfiles.3.gz # 20160423: remove mroute6d OLD_FILES+=etc/rc.d/mroute6d # 20160419: rename units.lib -> definitions.units OLD_FILES+=usr/share/misc/units.lib # 20160419: remove Big5HKSCS locales OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_COLLATE OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_CTYPE OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MONETARY OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_TIME OLD_DIRS+=usr/share/locale/zh_HK.Big5HKSCS OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hant_HK.Big5HKSCS # 20160317: rman_res_t size bump to uintmax_t OLD_LIBS+=usr/lib/libdevinfo.so.5 OLD_LIBS+=usr/lib32/libdevinfo.so.5 # 20160305: new clang import which bumps version from 3.7.1 to 3.8.0. OLD_FILES+=usr/bin/macho-dump OLD_FILES+=usr/bin/tblgen OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.7.1/include/sanitizer OLD_FILES+=usr/lib/clang/3.7.1/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.7.1/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/altivec.h OLD_FILES+=usr/lib/clang/3.7.1/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.7.1/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/cpuid.h OLD_FILES+=usr/lib/clang/3.7.1/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.7.1/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/immintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.7.1/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.7.1/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/module.modulemap OLD_FILES+=usr/lib/clang/3.7.1/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/vadefs.h OLD_FILES+=usr/lib/clang/3.7.1/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.7.1/include OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.7.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.7.1/lib OLD_DIRS+=usr/lib/clang/3.7.1 # 20160301: Remove taskqueue_enqueue_fast OLD_FILES+=usr/share/man/man9/taskqueue_enqueue_fast.9.gz # 20160225: Remove casperd and libcapsicum. OLD_FILES+=sbin/casperd OLD_FILES+=etc/rc.d/casperd OLD_FILES+=usr/share/man/man8/casperd.8.gz OLD_FILES+=usr/include/libcapsicum.h OLD_FILES+=usr/include/libcapsicum_service.h OLD_FILES+=usr/include/libcapsicum.h OLD_FILES+=usr/share/man/man3/libcapsicum.3.gz OLD_FILES+=usr/include/libcapsicum_dns.h OLD_FILES+=usr/include/libcapsicum_grp.h OLD_FILES+=usr/include/libcapsicum_impl.h OLD_FILES+=usr/include/libcapsicum_pwd.h OLD_FILES+=usr/include/libcapsicum_random.h OLD_FILES+=usr/include/libcapsicum_sysctl.h OLD_FILES+=libexec/casper/dns OLD_FILES+=libexec/casper/grp OLD_FILES+=libexec/casper/pwd OLD_FILES+=libexec/casper/random OLD_FILES+=libexec/casper/sysctl OLD_FILES+=libexec/casper/.debug/random.debug OLD_FILES+=libexec/casper/.debug/dns.debug OLD_FILES+=libexec/casper/.debug/sysctl.debug OLD_FILES+=libexec/casper/.debug/pwd.debug OLD_FILES+=libexec/casper/.debug/grp.debug OLD_DIRS+=libexec/casper/.debug OLD_DIRS+=libexec/casper OLD_FILES+=usr/lib/libcapsicum.a OLD_FILES+=usr/lib/libcapsicum.so OLD_LIBS+=lib/libcapsicum.so.0 OLD_FILES+=usr/lib/libcapsicum_p.a OLD_FILES+=usr/lib32/libcapsicum.a OLD_FILES+=usr/lib32/libcapsicum.so OLD_LIBS+=usr/lib32/libcapsicum.so.0 OLD_FILES+=usr/lib32/libcapsicum_p.a # 20160223: functionality from mkulzma(1) merged into mkuzip(1) OLD_FILES+=usr/bin/mkulzma OLD_FILES+=usr/share/man/man4/geom_uncompress.4.gz OLD_FILES+=usr/share/man/man8/mkulzma.8.gz # 20160211: Remove obsolete unbound-control-setup OLD_FILES+=usr/sbin/unbound-control-setup # 20160121: cc.h moved OLD_FILES+=usr/include/netinet/cc.h # 20160116: Update mandoc to cvs snapshot 20160116 OLD_FILES+=usr/share/mdocml/example.style.css OLD_FILES+=usr/share/mdocml/style.css OLD_DIRS+=usr/share/mdocml # 20160114: SA-16:06.snmpd OLD_FILES+=usr/share/examples/etc/snmpd.config # 20160107: GNU ld installed as ld.bfd and linked as ld OLD_FILES+=usr/lib/debug/usr/bin/ld.debug # 20151225: new clang import which bumps version from 3.7.0 to 3.7.1. OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.7.0/include/sanitizer OLD_FILES+=usr/lib/clang/3.7.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.7.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.7.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.7.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.7.0/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.7.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.7.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.7.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.7.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/vadefs.h OLD_FILES+=usr/lib/clang/3.7.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.7.0/include OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.7.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.7.0/lib OLD_DIRS+=usr/lib/clang/3.7.0 # 20151130: libelf moved from /usr/lib to /lib (libkvm dependency in r291406) OLD_LIBS+=usr/lib/libelf.so.2 # 20151115: Fox bad upgrade scheme OLD_FILES+=usr/share/locale/zh_CN.GB18030/zh_Hans_CN.GB18030 OLD_FILES+=usr/share/locale/zh_CN.GB2312/zh_Hans_CN.GB2312 OLD_FILES+=usr/share/locale/zh_CN.GBK/zh_Hans_CN.GBK OLD_FILES+=usr/share/locale/zh_CN.UTF-8/zh_Hans_CN.UTF-8 OLD_FILES+=usr/share/locale/zh_CN.eucCN/zh_Hans_CN.eucCN OLD_FILES+=usr/share/locale/zh_TW.Big5/zh_Hant_TW.Big5 OLD_FILES+=usr/share/locale/zh_TW.UTF-8/zh_Hant_TW.UTF-8 # 20151107: String collation improvements OLD_FILES+=usr/share/locale/UTF-8/LC_CTYPE OLD_DIRS+=usr/share/locale/UTF-8 OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_COLLATE OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_CTYPE OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MESSAGES OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MONETARY OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_NUMERIC OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_TIME OLD_DIRS+=usr/share/locale/kk_KZ.PT154/ OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-1 OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_CTYPE OLD_DIRS+=usr/share/locale/la_LN.ISO8859-13 OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-15 OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-2 OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-4 OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.US-ASCII OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MESSAGES OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_TIME OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_COLLATE OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MONETARY OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_CTYPE OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_NUMERIC OLD_DIRS+=usr/share/locale/lt_LT.ISO8859-4 OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.ISO8859-1 OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.ISO8859-15 OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.UTF-8 OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MONETARY OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-2 OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MONETARY OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MESSAGES OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-5 OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MESSAGES OLD_DIRS+=usr/share/locale/sr_YU.UTF-8 # 20151101: added missing _test suffix on multiple tests in lib/libc OLD_FILES+=usr/tests/lib/libc/c063/faccessat OLD_FILES+=usr/tests/lib/libc/c063/fchmodat OLD_FILES+=usr/tests/lib/libc/c063/fchownat OLD_FILES+=usr/tests/lib/libc/c063/fexecve OLD_FILES+=usr/tests/lib/libc/c063/fstatat OLD_FILES+=usr/tests/lib/libc/c063/linkat OLD_FILES+=usr/tests/lib/libc/c063/mkdirat OLD_FILES+=usr/tests/lib/libc/c063/mkfifoat OLD_FILES+=usr/tests/lib/libc/c063/mknodat OLD_FILES+=usr/tests/lib/libc/c063/openat OLD_FILES+=usr/tests/lib/libc/c063/readlinkat OLD_FILES+=usr/tests/lib/libc/c063/renameat OLD_FILES+=usr/tests/lib/libc/c063/symlinkat OLD_FILES+=usr/tests/lib/libc/c063/unlinkat OLD_FILES+=usr/tests/lib/libc/c063/utimensat OLD_FILES+=usr/tests/lib/libc/string/memchr OLD_FILES+=usr/tests/lib/libc/string/memcpy OLD_FILES+=usr/tests/lib/libc/string/memmem OLD_FILES+=usr/tests/lib/libc/string/memset OLD_FILES+=usr/tests/lib/libc/string/strcat OLD_FILES+=usr/tests/lib/libc/string/strchr OLD_FILES+=usr/tests/lib/libc/string/strcmp OLD_FILES+=usr/tests/lib/libc/string/strcpy OLD_FILES+=usr/tests/lib/libc/string/strcspn OLD_FILES+=usr/tests/lib/libc/string/strerror OLD_FILES+=usr/tests/lib/libc/string/strlen OLD_FILES+=usr/tests/lib/libc/string/strpbrk OLD_FILES+=usr/tests/lib/libc/string/strrchr OLD_FILES+=usr/tests/lib/libc/string/strspn OLD_FILES+=usr/tests/lib/libc/string/swab # 20151101: 430.status-rwho was renamed to 430.status-uptime OLD_FILES+=etc/periodic/daily/430.status-rwho # 20151030: OpenSSL 1.0.2d import OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_certs.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl_str.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_509_CRL_fp.3.gz OLD_LIBS+=lib/libcrypto.so.7 OLD_LIBS+=usr/lib/libssl.so.7 OLD_LIBS+=usr/lib32/libcrypto.so.7 OLD_LIBS+=usr/lib32/libssl.so.7 # 20151029: LinuxKPI moved to sys/compat/linuxkpi OLD_FILES+=usr/include/dev/usb/usb_compat_linux.h # 20151015: test symbols moved to /usr/lib/debug OLD_DIRS+=usr/tests/lib/atf/libatf-c++/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/atf_c++_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/build_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/check_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/config_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/macros_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/tests_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/utils_test.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c++/detail/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/application_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/env_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/exceptions_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/fs_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/process_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/sanity_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/text_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/version_helper.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/atf_c_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/build_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/check_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/config_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/error_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/macros_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tc_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tp_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/utils_test.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c/detail/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/dynstr_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/env_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/fs_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/list_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/map_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_helpers.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/sanity_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/text_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/user_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/version_helper.debug OLD_DIRS+=usr/tests/lib/atf/test-programs/.debug OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/c_helpers.debug OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/cpp_helpers.debug OLD_DIRS+=usr/tests/lib/libc/c063/.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/faccessat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchmodat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchownat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fexecve.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fstatat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/linkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkdirat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkfifoat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mknodat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/openat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/readlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/renameat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/symlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/unlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/utimensat.debug OLD_DIRS+=usr/tests/lib/libc/db/.debug OLD_FILES+=usr/tests/lib/libc/db/.debug/h_db.debug OLD_DIRS+=usr/tests/lib/libc/gen/.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/alarm_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/arc4random_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/assert_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/basedirname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/dir_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/floatunditf_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fnmatch_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify2_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetmask_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetround_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/ftok_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/getcwd_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/getgrent_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/glob_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/humanize_number_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/isnan_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/nice_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/pause_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/raise_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/realpath_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/setdomainname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/sethostname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/sleep_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/syslog_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/time_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/ttyname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/vis_test.debug OLD_DIRS+=usr/tests/lib/libc/gen/execve/.debug OLD_FILES+=usr/tests/lib/libc/gen/execve/.debug/execve_test.debug OLD_DIRS+=usr/tests/lib/libc/gen/posix_spawn/.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/fileactions_test.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_fileactions.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawn.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawnattr.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawn_test.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawnattr_test.debug OLD_DIRS+=usr/tests/lib/libc/hash/.debug OLD_FILES+=usr/tests/lib/libc/hash/.debug/h_hash.debug OLD_FILES+=usr/tests/lib/libc/hash/.debug/sha2_test.debug OLD_DIRS+=usr/tests/lib/libc/inet/.debug OLD_FILES+=usr/tests/lib/libc/inet/.debug/inet_network_test.debug OLD_DIRS+=usr/tests/lib/libc/locale/.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/io_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbrtowc_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbsnrtowcs_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbstowcs_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbtowc_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcscspn_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcspbrk_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcsspn_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcstod_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wctomb_test.debug OLD_DIRS+=usr/tests/lib/libc/net/.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/ether_aton_test.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/getprotoent_test.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_dns_server.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_nsd_recurse.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_protoent.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_servent.debug OLD_DIRS+=usr/tests/lib/libc/regex/.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/exhaust_test.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/h_regex.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/regex_att_test.debug OLD_DIRS+=usr/tests/lib/libc/ssp/.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_fgets.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_getcwd.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_gets.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memmove.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memset.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_raw.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_read.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_readlink.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_snprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_sprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpncpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcat.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncat.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsnprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsprintf.debug OLD_DIRS+=usr/tests/lib/libc/stdio/.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/clearerr_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fflush_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen2_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fopen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fputc_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/mktemp_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/popen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/printf_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/scanf_test.debug OLD_DIRS+=usr/tests/lib/libc/stdlib/.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/abs_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/atoi_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/div_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/exit_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/getenv_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt_long.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/hsearch_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/posix_memalign_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/random_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtod_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtol_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/system_test.debug OLD_DIRS+=usr/tests/lib/libc/string/.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memcpy.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memmem.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memset.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcat.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcmp.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcpy.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcspn.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strerror.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strlen.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strpbrk.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strrchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strspn.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/swab.debug OLD_DIRS+=usr/tests/lib/libc/sys/.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/access_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/chroot_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/clock_gettime_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/connect_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/dup_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/fsync_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getcontext_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getgroups_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getitimer_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getlogin_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getpid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getrusage_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getsid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/gettimeofday_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/issetugid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/kevent_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/kill_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/link_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/listen_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mincore_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkdir_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkfifo_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mknod_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mlock_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mmap_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mprotect_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgctl_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgget_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgrcv_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgsnd_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msync_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/nanosleep_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe2_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/poll_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/revoke_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/select_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/setrlimit_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/setuid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigaction_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigqueue_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigtimedwait_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/socketpair_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/stat_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/timer_create_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/truncate_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/ucontext_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/umask_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/unlink_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/write_test.debug OLD_DIRS+=usr/tests/lib/libc/termios/.debug OLD_FILES+=usr/tests/lib/libc/termios/.debug/tcsetpgrp_test.debug OLD_DIRS+=usr/tests/lib/libc/tls/.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/h_tls_dlopen.so.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/libh_tls_dynamic.so.1.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dlopen_test.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dynamic_test.debug OLD_DIRS+=usr/tests/lib/libc/ttyio/.debug OLD_FILES+=usr/tests/lib/libc/ttyio/.debug/ttyio_test.debug OLD_DIRS+=usr/tests/lib/libcrypt/.debug OLD_FILES+=usr/tests/lib/libcrypt/.debug/crypt_tests.debug OLD_DIRS+=usr/tests/lib/libmp/.debug OLD_FILES+=usr/tests/lib/libmp/.debug/legacy_test.debug OLD_DIRS+=usr/tests/lib/libnv/.debug OLD_FILES+=usr/tests/lib/libnv/.debug/dnv_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nv_array_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nv_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_add_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_exists_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_free_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_get_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_move_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_send_recv_test.debug OLD_DIRS+=usr/tests/lib/libpam/.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_ctype.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readlinev.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readword.debug OLD_DIRS+=usr/tests/lib/libproc/.debug OLD_FILES+=usr/tests/lib/libproc/.debug/proc_test.debug OLD_FILES+=usr/tests/lib/libproc/.debug/target_prog.debug OLD_DIRS+=usr/tests/lib/librt/.debug OLD_FILES+=usr/tests/lib/librt/.debug/sched_test.debug OLD_FILES+=usr/tests/lib/librt/.debug/sem_test.debug OLD_DIRS+=usr/tests/lib/libthr/.debug OLD_FILES+=usr/tests/lib/libthr/.debug/barrier_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/cond_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/condwait_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/detach_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/equal_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/fork_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/fpu_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_atexit.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_cancel.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_exit.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_resolv.debug OLD_FILES+=usr/tests/lib/libthr/.debug/join_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/kill_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/mutex_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/once_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/preempt_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/rwlock_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sem_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/siglongjmp_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sigmask_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sigsuspend_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sleep_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/swapcontext_test.debug OLD_DIRS+=usr/tests/lib/libthr/dlopen/.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/dlopen_test.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/h_pthread_dlopen.so.1.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/main_pthread_create_test.debug OLD_DIRS+=usr/tests/lib/libutil/.debug OLD_FILES+=usr/tests/lib/libutil/.debug/flopen_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/grp_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/humanize_number_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/pidfile_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain-nodomain_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain_test.debug OLD_DIRS+=usr/tests/lib/libxo/.debug OLD_FILES+=usr/tests/lib/libxo/.debug/libenc_test.so.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_01.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_02.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_03.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_04.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_05.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_06.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_07.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_08.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_09.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_10.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_11.debug OLD_DIRS+=usr/tests/lib/msun/.debug OLD_FILES+=usr/tests/lib/msun/.debug/acos_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/asin_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/atan_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cbrt_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/ceil_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cos_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cosh_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/erf_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/exp_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/fmod_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/infinity_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/ldexp_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/log_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/pow_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/precision_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/round_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/scalbn_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sin_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sinh_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sqrt_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/tan_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/tanh_test.debug OLD_DIRS+=usr/tests/libexec/rtld-elf/.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/ld_library_pathfds.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/libpythagoras.so.0.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/target.debug OLD_DIRS+=usr/tests/sbin/devd/.debug OLD_FILES+=usr/tests/sbin/devd/.debug/client_test.debug OLD_DIRS+=usr/tests/sbin/dhclient/.debug OLD_FILES+=usr/tests/sbin/dhclient/.debug/option-domain-search_test.debug OLD_DIRS+=usr/tests/share/examples/tests/atf/.debug OLD_FILES+=usr/tests/share/examples/tests/atf/.debug/printf_test.debug OLD_DIRS+=usr/tests/share/examples/tests/plain/.debug OLD_FILES+=usr/tests/share/examples/tests/plain/.debug/printf_test.debug OLD_DIRS+=usr/tests/sys/aio/.debug OLD_FILES+=usr/tests/sys/aio/.debug/aio_kqueue_test.debug OLD_FILES+=usr/tests/sys/aio/.debug/aio_test.debug OLD_FILES+=usr/tests/sys/aio/.debug/lio_kqueue_test.debug OLD_DIRS+=usr/tests/sys/fifo/.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_create.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_io.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_misc.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_open.debug OLD_DIRS+=usr/tests/sys/file/.debug OLD_FILES+=usr/tests/sys/file/.debug/closefrom_test.debug OLD_FILES+=usr/tests/sys/file/.debug/dup_test.debug OLD_FILES+=usr/tests/sys/file/.debug/fcntlflags_test.debug OLD_FILES+=usr/tests/sys/file/.debug/flock_helper.debug OLD_FILES+=usr/tests/sys/file/.debug/ftruncate_test.debug OLD_FILES+=usr/tests/sys/file/.debug/newfileops_on_fork_test.debug OLD_DIRS+=usr/tests/sys/kern/.debug OLD_FILES+=usr/tests/sys/kern/.debug/kern_descrip_test.debug OLD_FILES+=usr/tests/sys/kern/.debug/ptrace_test.debug OLD_FILES+=usr/tests/sys/kern/.debug/unix_seqpacket_test.debug OLD_DIRS+=usr/tests/sys/kern/execve/.debug OLD_FILES+=usr/tests/sys/kern/execve/.debug/execve_helper.debug OLD_FILES+=usr/tests/sys/kern/execve/.debug/good_aout.debug OLD_DIRS+=usr/tests/sys/kqueue/.debug OLD_FILES+=usr/tests/sys/kqueue/.debug/kqtest.debug OLD_DIRS+=usr/tests/sys/mqueue/.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest1.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest2.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest3.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest4.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest5.debug OLD_DIRS+=usr/tests/sys/netinet/.debug OLD_FILES+=usr/tests/sys/netinet/.debug/udp_dontroute.debug OLD_DIRS+=usr/tests/sys/pjdfstest/.debug OLD_FILES+=usr/tests/sys/pjdfstest/.debug/pjdfstest.debug OLD_DIRS+=usr/tests/sys/vm/.debug OLD_FILES+=usr/tests/sys/vm/.debug/mmap_test.debug # 20151015: Rename files due to file-installed-as-dir bug OLD_FILES+=usr/share/doc/legal/realtek OLD_FILES+=usr/share/doc/legal/realtek/LICENSE OLD_DIRS+=usr/share/doc/legal/realtek OLD_DIRS+=usr/share/doc/legal/intel_ipw OLD_FILES+=usr/share/doc/legal/intel_ipw/LICENSE OLD_FILES+=usr/share/doc/legal/intel_iwn OLD_FILES+=usr/share/doc/legal/intel_iwn/LICENSE OLD_DIRS+=usr/share/doc/legal/intel_iwn OLD_DIRS+=usr/share/doc/legal/intel_iwi OLD_FILES+=usr/share/doc/legal/intel_iwi/LICENSE OLD_DIRS+=usr/share/doc/legal/intel_wpi OLD_FILES+=usr/share/doc/legal/intel_wpi/LICENSE # 20151006: new libc++ import OLD_FILES+=usr/include/c++/__tuple_03 OLD_FILES+=usr/include/c++/v1/__tuple_03 OLD_FILES+=usr/include/c++/v1/tr1/__tuple_03 # 20151006: new clang import which bumps version from 3.6.1 to 3.7.0. OLD_FILES+=usr/lib/clang/3.6.1/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.6.1/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/altivec.h OLD_FILES+=usr/lib/clang/3.6.1/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.6.1/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/cpuid.h OLD_FILES+=usr/lib/clang/3.6.1/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/immintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.6.1/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.6.1/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/module.modulemap OLD_FILES+=usr/lib/clang/3.6.1/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/xopintrin.h OLD_DIRS+=usr/lib/clang/3.6.1/include OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.6.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.6.1/lib OLD_DIRS+=usr/lib/clang/3.6.1 # 20150928: unused sgsmsg utility is removed OLD_FILES+=usr/bin/sgsmsg # 20150926: remove links to removed/unimplemented mbuf(9) macros OLD_FILES+=usr/share/man/man9/MEXT_ADD_REF.9.gz OLD_FILES+=usr/share/man/man9/MEXTFREE.9.gz OLD_FILES+=usr/share/man/man9/MEXT_IS_REF.9.gz OLD_FILES+=usr/share/man/man9/MEXT_REM_REF.9.gz OLD_FILES+=usr/share/man/man9/MFREE.9.gz # 20150818: *allocm() are gone in jemalloc 4.0.0 OLD_FILES+=usr/share/man/man3/allocm.3.gz OLD_FILES+=usr/share/man/man3/dallocm.3.gz OLD_FILES+=usr/share/man/man3/nallocm.3.gz OLD_FILES+=usr/share/man/man3/rallocm.3.gz OLD_FILES+=usr/share/man/man3/sallocm.3.gz # 20150802: Remove netbsd's test on pw(8) OLD_FILES+=usr/tests/usr.sbin/pw/pw_test # 20150719: Remove libarchive.pc OLD_FILES+=usr/libdata/pkgconfig/libarchive.pc # 20150705: Rename DTrace provider man pages. OLD_FILES+=usr/share/man/man4/dtrace-io.4.gz OLD_FILES+=usr/share/man/man4/dtrace-ip.4.gz OLD_FILES+=usr/share/man/man4/dtrace-proc.4.gz OLD_FILES+=usr/share/man/man4/dtrace-sched.4.gz OLD_FILES+=usr/share/man/man4/dtrace-tcp.4.gz OLD_FILES+=usr/share/man/man4/dtrace-udp.4.gz # 20150704: nvlist private headers no longer installed OLD_FILES+=usr/include/sys/nv_impl.h OLD_FILES+=usr/include/sys/nvlist_impl.h OLD_FILES+=usr/include/sys/nvpair_impl.h # 20150624 OLD_LIBS+=usr/lib/libugidfw.so.4 OLD_LIBS+=usr/lib32/libugidfw.so.4 # 20150604: Move nvlist man pages to section 9. OLD_FILES+=usr/share/man/man3/libnv.3.gz OLD_FILES+=usr/share/man/man3/nv.3.gz OLD_FILES+=usr/share/man/man3/nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_stringf.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_stringv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_clone.3.gz OLD_FILES+=usr/share/man/man3/nvlist_create.3.gz OLD_FILES+=usr/share/man/man3/nvlist_destroy.3.gz OLD_FILES+=usr/share/man/man3/nvlist_dump.3.gz OLD_FILES+=usr/share/man/man3/nvlist_empty.3.gz OLD_FILES+=usr/share/man/man3/nvlist_error.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_fdump.3.gz OLD_FILES+=usr/share/man/man3/nvlist_flags.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_parent.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_next.3.gz OLD_FILES+=usr/share/man/man3/nvlist_pack.3.gz OLD_FILES+=usr/share/man/man3/nvlist_recv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_send.3.gz OLD_FILES+=usr/share/man/man3/nvlist_set_error.3.gz OLD_FILES+=usr/share/man/man3/nvlist_size.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_unpack.3.gz OLD_FILES+=usr/share/man/man3/nvlist_xfer.3.gz # 20150702: Remove duplicated nvlist includes. OLD_FILES+=usr/include/dnv.h OLD_FILES+=usr/include/nv.h # 20150528: PCI IOV device driver methods moved to a separate kobj interface. OLD_FILES+=usr/share/man/man9/PCI_ADD_VF.9.gz OLD_FILES+=usr/share/man/man9/PCI_INIT_IOV.9.gz OLD_FILES+=usr/share/man/man9/PCI_UNINIT_IOV.9.gz # 20150525: new clang import which bumps version from 3.6.0 to 3.6.1. OLD_FILES+=usr/lib/clang/3.6.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.6.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.6.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.6.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.6.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.6.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.6.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.6.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/xopintrin.h OLD_DIRS+=usr/lib/clang/3.6.0/include OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.6.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.6.0/lib OLD_DIRS+=usr/lib/clang/3.6.0 # 20150521 OLD_FILES+=usr/bin/demandoc OLD_FILES+=usr/share/man/man1/demandoc.1.gz OLD_FILES+=usr/share/man/man3/mandoc.3.gz OLD_FILES+=usr/share/man/man3/mandoc_headers.3.gz # 20150520 OLD_FILES+=usr/lib/libheimsqlite.a OLD_FILES+=usr/lib/libheimsqlite.so OLD_LIBS+=usr/lib/libheimsqlite.so.11 OLD_FILES+=usr/lib/libheimsqlite_p.a OLD_FILES+=usr/lib32/libheimsqlite.a OLD_FILES+=usr/lib32/libheimsqlite.so OLD_LIBS+=usr/lib32/libheimsqlite.so.11 OLD_FILES+=usr/lib32/libheimsqlite_p.a # 20150518: tzdata2015c update OLD_FILES+=usr/share/zoneinfo/America/Montreal # 20150506 OLD_FILES+=usr/share/man/man9/NDHASGIANT.9.gz # 20150504 OLD_FILES+=usr/share/examples/etc/libmap32.conf OLD_FILES+=usr/include/bsdstat.h OLD_LIBS+=usr/lib32/private/libatf-c++.so.2 OLD_LIBS+=usr/lib32/private/libbsdstat.so.1 OLD_LIBS+=usr/lib32/private/libheimipcs.so.11 OLD_LIBS+=usr/lib32/private/libsqlite3.so.0 OLD_LIBS+=usr/lib32/private/libunbound.so.5 OLD_LIBS+=usr/lib32/private/libatf-c.so.1 OLD_LIBS+=usr/lib32/private/libheimipcc.so.11 OLD_LIBS+=usr/lib32/private/libldns.so.5 OLD_LIBS+=usr/lib32/private/libssh.so.5 OLD_LIBS+=usr/lib32/private/libucl.so.1 OLD_DIRS+=usr/lib32/private OLD_LIBS+=usr/lib/private/libatf-c++.so.2 OLD_LIBS+=usr/lib/private/libbsdstat.so.1 OLD_LIBS+=usr/lib/private/libheimipcs.so.11 OLD_LIBS+=usr/lib/private/libsqlite3.so.0 OLD_LIBS+=usr/lib/private/libunbound.so.5 OLD_LIBS+=usr/lib/private/libatf-c.so.1 OLD_LIBS+=usr/lib/private/libheimipcc.so.11 OLD_LIBS+=usr/lib/private/libldns.so.5 OLD_LIBS+=usr/lib/private/libssh.so.5 OLD_LIBS+=usr/lib/private/libucl.so.1 OLD_DIRS+=usr/lib/private # 20150501 OLD_FILES+=usr/bin/soeliminate OLD_FILES+=usr/share/man/man1/soeliminate.1.gz # 20150501: Remove the nvlist_.*[vf] functions manpages. OLD_FILES+=usr/share/man/man3/nvlist_addf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_string.3.gz # 20150429: remove never written documentation OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz # 20150427: test/sys/kern/mmap_test moved to test/sys/vm/mmap_test OLD_FILES+=usr/tests/sys/kern/mmap_test # 20150422: zlib.c moved from net to libkern OLD_FILES+=usr/include/net/zlib.h OLD_FILES+=usr/include/net/zutil.h # 20150418 OLD_FILES+=sbin/mount_oldnfs OLD_FILES+=usr/share/man/man8/mount_oldnfs.8.gz # 20150416: ALTQ moved to net/altq OLD_FILES+=usr/include/altq/altq_rmclass_debug.h OLD_FILES+=usr/include/altq/altq.h OLD_FILES+=usr/include/altq/altq_cdnr.h OLD_FILES+=usr/include/altq/altq_hfsc.h OLD_FILES+=usr/include/altq/altq_priq.h OLD_FILES+=usr/include/altq/altqconf.h OLD_FILES+=usr/include/altq/altq_classq.h OLD_FILES+=usr/include/altq/altq_red.h OLD_FILES+=usr/include/altq/if_altq.h OLD_FILES+=usr/include/altq/altq_var.h OLD_FILES+=usr/include/altq/altq_rmclass.h OLD_FILES+=usr/include/altq/altq_cbq.h OLD_FILES+=usr/include/altq/altq_rio.h OLD_DIRS+=usr/include/altq # 20150330: ntp 4.2.8p1 OLD_FILES+=usr/share/doc/ntp/driver1.html OLD_FILES+=usr/share/doc/ntp/driver10.html OLD_FILES+=usr/share/doc/ntp/driver11.html OLD_FILES+=usr/share/doc/ntp/driver12.html OLD_FILES+=usr/share/doc/ntp/driver16.html OLD_FILES+=usr/share/doc/ntp/driver18.html OLD_FILES+=usr/share/doc/ntp/driver19.html OLD_FILES+=usr/share/doc/ntp/driver2.html OLD_FILES+=usr/share/doc/ntp/driver20.html OLD_FILES+=usr/share/doc/ntp/driver22.html OLD_FILES+=usr/share/doc/ntp/driver26.html OLD_FILES+=usr/share/doc/ntp/driver27.html OLD_FILES+=usr/share/doc/ntp/driver28.html OLD_FILES+=usr/share/doc/ntp/driver29.html OLD_FILES+=usr/share/doc/ntp/driver3.html OLD_FILES+=usr/share/doc/ntp/driver30.html OLD_FILES+=usr/share/doc/ntp/driver32.html OLD_FILES+=usr/share/doc/ntp/driver33.html OLD_FILES+=usr/share/doc/ntp/driver34.html OLD_FILES+=usr/share/doc/ntp/driver35.html OLD_FILES+=usr/share/doc/ntp/driver36.html OLD_FILES+=usr/share/doc/ntp/driver37.html OLD_FILES+=usr/share/doc/ntp/driver4.html OLD_FILES+=usr/share/doc/ntp/driver5.html OLD_FILES+=usr/share/doc/ntp/driver6.html OLD_FILES+=usr/share/doc/ntp/driver7.html OLD_FILES+=usr/share/doc/ntp/driver8.html OLD_FILES+=usr/share/doc/ntp/driver9.html OLD_FILES+=usr/share/doc/ntp/ldisc.html OLD_FILES+=usr/share/doc/ntp/measure.html OLD_FILES+=usr/share/doc/ntp/mx4200data.html OLD_FILES+=usr/share/doc/ntp/notes.html OLD_FILES+=usr/share/doc/ntp/patches.html OLD_FILES+=usr/share/doc/ntp/porting.html OLD_FILES+=usr/share/man/man1/sntp.1.gz # 20150329 .if ${TARGET_ARCH} == "arm" OLD_FILES+=usr/include/bootconfig.h .endif # 20150326 OLD_FILES+=usr/share/man/man1/pmcstudy.1.gz # 20150315: new clang import which bumps version from 3.5.1 to 3.6.0. OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.5.1/altivec.h OLD_FILES+=usr/include/clang/3.5.1/ammintrin.h OLD_FILES+=usr/include/clang/3.5.1/arm_acle.h OLD_FILES+=usr/include/clang/3.5.1/arm_neon.h OLD_FILES+=usr/include/clang/3.5.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.5.1/avxintrin.h OLD_FILES+=usr/include/clang/3.5.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.5.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.5.1/cpuid.h OLD_FILES+=usr/include/clang/3.5.1/emmintrin.h OLD_FILES+=usr/include/clang/3.5.1/f16cintrin.h OLD_FILES+=usr/include/clang/3.5.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.5.1/fmaintrin.h OLD_FILES+=usr/include/clang/3.5.1/ia32intrin.h OLD_FILES+=usr/include/clang/3.5.1/immintrin.h OLD_FILES+=usr/include/clang/3.5.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.5.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.5.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.5.1/mmintrin.h OLD_FILES+=usr/include/clang/3.5.1/module.modulemap OLD_FILES+=usr/include/clang/3.5.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.5.1/prfchwintrin.h OLD_FILES+=usr/include/clang/3.5.1/rdseedintrin.h OLD_FILES+=usr/include/clang/3.5.1/rtmintrin.h OLD_FILES+=usr/include/clang/3.5.1/shaintrin.h OLD_FILES+=usr/include/clang/3.5.1/smmintrin.h OLD_FILES+=usr/include/clang/3.5.1/tbmintrin.h OLD_FILES+=usr/include/clang/3.5.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/x86intrin.h OLD_FILES+=usr/include/clang/3.5.1/xmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/xopintrin.h OLD_DIRS+=usr/include/clang/3.5.1 OLD_DIRS+=usr/include/clang OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.5.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.5.1/lib OLD_DIRS+=usr/lib/clang/3.5.1 # 20150302: binutils documentation distributed as a manpage OLD_FILES+=usr/share/doc/binutils/as.txt OLD_FILES+=usr/share/doc/binutils/ld.txt OLD_DIRS+=usr/share/doc/binutils # 20150222: Removed bcd(6) and ppt(6) OLD_FILES+=usr/bin/bcd OLD_FILES+=usr/bin/ppt OLD_FILES+=usr/share/man/man6/bcd.6.gz OLD_FILES+=usr/share/man/man6/ppt.6.gz # 20150217: Removed remnants of ar(4) driver OLD_FILES+=usr/include/dev/ic/hd64570.h # 20150212: /usr/games moving into /usr/bin OLD_FILES+=usr/games/bcd OLD_FILES+=usr/games/caesar OLD_FILES+=usr/games/factor OLD_FILES+=usr/games/fortune OLD_FILES+=usr/games/grdc OLD_FILES+=usr/games/morse OLD_FILES+=usr/games/number OLD_FILES+=usr/games/pom OLD_FILES+=usr/games/ppt OLD_FILES+=usr/games/primes OLD_FILES+=usr/games/random OLD_FILES+=usr/games/rot13 OLD_FILES+=usr/games/strfile OLD_FILES+=usr/games/unstr OLD_DIRS+=usr/games # 20150209: liblzma header OLD_FILES+=usr/include/lzma/lzma.h # 20150124: spl.9 and friends OLD_FILES+=usr/share/man/man9/spl.9.gz OLD_FILES+=usr/share/man/man9/spl0.9.gz OLD_FILES+=usr/share/man/man9/splbio.9.gz OLD_FILES+=usr/share/man/man9/splclock.9.gz OLD_FILES+=usr/share/man/man9/splhigh.9.gz OLD_FILES+=usr/share/man/man9/splimp.9.gz OLD_FILES+=usr/share/man/man9/splnet.9.gz OLD_FILES+=usr/share/man/man9/splsoftclock.9.gz OLD_FILES+=usr/share/man/man9/splsofttty.9.gz OLD_FILES+=usr/share/man/man9/splstatclock.9.gz OLD_FILES+=usr/share/man/man9/spltty.9.gz OLD_FILES+=usr/share/man/man9/splvm.9.gz OLD_FILES+=usr/share/man/man9/splx.9.gz # 20150118: toeplitz.c moved from netinet to net OLD_FILES+=usr/include/netinet/toeplitz.h # 20150118: new clang import which bumps version from 3.5.0 to 3.5.1. OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.5.0/altivec.h OLD_FILES+=usr/include/clang/3.5.0/ammintrin.h OLD_FILES+=usr/include/clang/3.5.0/arm_acle.h OLD_FILES+=usr/include/clang/3.5.0/arm_neon.h OLD_FILES+=usr/include/clang/3.5.0/avx2intrin.h OLD_FILES+=usr/include/clang/3.5.0/avxintrin.h OLD_FILES+=usr/include/clang/3.5.0/bmi2intrin.h OLD_FILES+=usr/include/clang/3.5.0/bmiintrin.h OLD_FILES+=usr/include/clang/3.5.0/cpuid.h OLD_FILES+=usr/include/clang/3.5.0/emmintrin.h OLD_FILES+=usr/include/clang/3.5.0/f16cintrin.h OLD_FILES+=usr/include/clang/3.5.0/fma4intrin.h OLD_FILES+=usr/include/clang/3.5.0/fmaintrin.h OLD_FILES+=usr/include/clang/3.5.0/ia32intrin.h OLD_FILES+=usr/include/clang/3.5.0/immintrin.h OLD_FILES+=usr/include/clang/3.5.0/lzcntintrin.h OLD_FILES+=usr/include/clang/3.5.0/mm3dnow.h OLD_FILES+=usr/include/clang/3.5.0/mm_malloc.h OLD_FILES+=usr/include/clang/3.5.0/mmintrin.h OLD_FILES+=usr/include/clang/3.5.0/module.modulemap OLD_FILES+=usr/include/clang/3.5.0/nmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/pmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/popcntintrin.h OLD_FILES+=usr/include/clang/3.5.0/prfchwintrin.h OLD_FILES+=usr/include/clang/3.5.0/rdseedintrin.h OLD_FILES+=usr/include/clang/3.5.0/rtmintrin.h OLD_FILES+=usr/include/clang/3.5.0/shaintrin.h OLD_FILES+=usr/include/clang/3.5.0/smmintrin.h OLD_FILES+=usr/include/clang/3.5.0/tbmintrin.h OLD_FILES+=usr/include/clang/3.5.0/tmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/wmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/x86intrin.h OLD_FILES+=usr/include/clang/3.5.0/xmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/xopintrin.h OLD_DIRS+=usr/include/clang/3.5.0 OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.5.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.5.0/lib OLD_DIRS+=usr/lib/clang/3.5.0 # 20150102: removal of asr(4) OLD_FILES+=usr/share/man/man4/asr.4.gz # 20150102: removal of texinfo OLD_FILES+=usr/bin/info OLD_FILES+=usr/bin/infokey OLD_FILES+=usr/bin/install-info OLD_FILES+=usr/bin/makeinfo OLD_FILES+=usr/bin/texindex OLD_FILES+=usr/share/info/am-utils.info.gz OLD_FILES+=usr/share/info/as.info.gz OLD_FILES+=usr/share/info/binutils.info.gz OLD_FILES+=usr/share/info/com_err.info.gz OLD_FILES+=usr/share/info/cpp.info.gz OLD_FILES+=usr/share/info/cppinternals.info.gz OLD_FILES+=usr/share/info/diff.info.gz OLD_FILES+=usr/share/info/dir OLD_FILES+=usr/share/info/gcc.info.gz OLD_FILES+=usr/share/info/gccint.info.gz OLD_FILES+=usr/share/info/gdb.info.gz OLD_FILES+=usr/share/info/gdbint.info.gz OLD_FILES+=usr/share/info/gperf.info.gz OLD_FILES+=usr/share/info/grep.info.gz OLD_FILES+=usr/share/info/groff.info.gz OLD_FILES+=usr/share/info/heimdal.info.gz OLD_FILES+=usr/share/info/history.info.gz OLD_FILES+=usr/share/info/info-stnd.info.gz OLD_FILES+=usr/share/info/info.info.gz OLD_FILES+=usr/share/info/ld.info.gz OLD_FILES+=usr/share/info/regex.info.gz OLD_FILES+=usr/share/info/rluserman.info.gz OLD_FILES+=usr/share/info/stabs.info.gz OLD_FILES+=usr/share/info/texinfo.info.gz OLD_FILES+=usr/share/man/man1/info.1.gz OLD_FILES+=usr/share/man/man1/infokey.1.gz OLD_FILES+=usr/share/man/man1/install-info.1.gz OLD_FILES+=usr/share/man/man1/makeinfo.1.gz OLD_FILES+=usr/share/man/man1/texindex.1.gz OLD_FILES+=usr/share/man/man5/info.5.gz OLD_FILES+=usr/share/man/man5/texinfo.5.gz OLD_DIRS+=usr/share/info # 20141231: new clang import which bumps version from 3.4.1 to 3.5.0. OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.4.1/altivec.h OLD_FILES+=usr/include/clang/3.4.1/ammintrin.h OLD_FILES+=usr/include/clang/3.4.1/arm_neon.h OLD_FILES+=usr/include/clang/3.4.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.4.1/avxintrin.h OLD_FILES+=usr/include/clang/3.4.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.4.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.4.1/cpuid.h OLD_FILES+=usr/include/clang/3.4.1/emmintrin.h OLD_FILES+=usr/include/clang/3.4.1/f16cintrin.h OLD_FILES+=usr/include/clang/3.4.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.4.1/fmaintrin.h OLD_FILES+=usr/include/clang/3.4.1/immintrin.h OLD_FILES+=usr/include/clang/3.4.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.4.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.4.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.4.1/mmintrin.h OLD_FILES+=usr/include/clang/3.4.1/module.map OLD_FILES+=usr/include/clang/3.4.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.4.1/prfchwintrin.h OLD_FILES+=usr/include/clang/3.4.1/rdseedintrin.h OLD_FILES+=usr/include/clang/3.4.1/rtmintrin.h OLD_FILES+=usr/include/clang/3.4.1/shaintrin.h OLD_FILES+=usr/include/clang/3.4.1/smmintrin.h OLD_FILES+=usr/include/clang/3.4.1/tbmintrin.h OLD_FILES+=usr/include/clang/3.4.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/x86intrin.h OLD_FILES+=usr/include/clang/3.4.1/xmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/xopintrin.h OLD_DIRS+=usr/include/clang/3.4.1 # 20141225: Remove gpib/ieee488 OLD_FILES+=usr/include/dev/ieee488/ibfoo_int.h OLD_FILES+=usr/include/dev/ieee488/tnt4882.h OLD_FILES+=usr/include/dev/ieee488/ugpib.h OLD_FILES+=usr/include/dev/ieee488/upd7210.h OLD_DIRS+=usr/include/dev/ieee488 OLD_FILES+=usr/include/gpib/gpib.h OLD_DIRS+=usr/include/gpib OLD_FILES+=usr/lib/libgpib.a OLD_FILES+=usr/lib/libgpib_p.a OLD_FILES+=usr/lib/libgpib.so OLD_LIBS+=usr/lib/libgpib.so.3 OLD_FILES+=usr/lib/libgpib_p.a OLD_FILES+=usr/lib32/libgpib.a OLD_FILES+=usr/lib32/libgpib_p.a OLD_FILES+=usr/lib32/libgpib.so OLD_LIBS+=usr/lib32/libgpib.so.3 OLD_FILES+=usr/share/man/man3/gpib.3.gz OLD_FILES+=usr/share/man/man3/ibclr.3.gz OLD_FILES+=usr/share/man/man3/ibdev.3.gz OLD_FILES+=usr/share/man/man3/ibdma.3.gz OLD_FILES+=usr/share/man/man3/ibeos.3.gz OLD_FILES+=usr/share/man/man3/ibeot.3.gz OLD_FILES+=usr/share/man/man3/ibloc.3.gz OLD_FILES+=usr/share/man/man3/ibonl.3.gz OLD_FILES+=usr/share/man/man3/ibpad.3.gz OLD_FILES+=usr/share/man/man3/ibrd.3.gz OLD_FILES+=usr/share/man/man3/ibsad.3.gz OLD_FILES+=usr/share/man/man3/ibsic.3.gz OLD_FILES+=usr/share/man/man3/ibtmo.3.gz OLD_FILES+=usr/share/man/man3/ibtrg.3.gz OLD_FILES+=usr/share/man/man3/ibwrt.3.gz OLD_FILES+=usr/share/man/man4/gpib.4.gz OLD_FILES+=usr/share/man/man4/pcii.4.gz OLD_FILES+=usr/share/man/man4/tnt4882.4.gz # 20141224: libxo moved to /lib OLD_LIBS+=usr/lib/libxo.so.0 # 20141223: remove in6_gif.h, in_gif.h and if_stf.h OLD_FILES+=usr/include/net/if_stf.h OLD_FILES+=usr/include/netinet/in_gif.h OLD_FILES+=usr/include/netinet6/in6_gif.h # 20141209: pw tests broken into a file per command OLD_FILES+=usr/tests/usr.sbin/pw/pw_delete OLD_FILES+=usr/tests/usr.sbin/pw/pw_modify # 20141202: update to mandoc CVS 20141201 OLD_FILES+=usr.bin/preconv OLD_FILES+=share/man/man1/preconv.1.gz # 20141129: mrouted rc.d scripts removed from base OLD_FILES+=etc/rc.d/mrouted # 20141126: convert sbin/mdconfig/tests to ATF format tests OLD_FILES+=usr/tests/sbin/mdconfig/legacy_test OLD_FILES+=usr/tests/sbin/mdconfig/mdconfig.test OLD_FILES+=usr/tests/sbin/mdconfig/run.pl # 20141126: remove xform_ipip decapsulation fallback OLD_FILES+=usr/include/netipsec/ipip_var.h # 20141122: mandoc updated to 1.13.1 OLD_FILES+=usr/share/mdocml/external.png # 20141111: SF_KQUEUE code removed OLD_FILES+=usr/include/sys/sf_base.h OLD_FILES+=usr/include/sys/sf_sync.h # 20141109: faith/faithd removal OLD_FILES+=etc/rc.d/faith OLD_FILES+=usr/share/man/man4/faith.4.gz OLD_FILES+=usr/share/man/man4/if_faith.4.gz OLD_FILES+=usr/sbin/faithd OLD_FILES+=usr/share/man/man8/faithd.8.gz # 20141107: overhaul if_gre(4) OLD_FILES+=usr/include/netinet/ip_gre.h # 20141102: postrandom obsoleted by new /dev/random code OLD_FILES+=etc/rc.d/postrandom # 20141031: initrandom obsoleted by new /dev/random code OLD_FILES+=etc/rc.d/initrandom # 20141030: atf 0.21 import OLD_FILES+=usr/share/man/man3/atf-c++-api.3.gz # 20141028: debug files accidentally installed as directory name OLD_FILES+=usr/lib/debug/usr/lib/i18n OLD_FILES+=usr/lib/debug/usr/lib/private OLD_FILES+=usr/lib/debug/usr/lib32/i18n OLD_FILES+=usr/lib/debug/usr/lib32/private # 20141015: OpenSSL 1.0.1j import OLD_FILES+=usr/share/openssl/man/man3/CMS_sign_add1_signer.3.gz # 20141003: libproc version bump OLD_LIBS+=usr/lib/libproc.so.2 OLD_LIBS+=usr/lib32/libproc.so.2 # 20140922: sleepq_calc_signal_retval.9 and sleepq_catch_signals.9 removed OLD_FILES+=usr/share/man/man9/sleepq_calc_signal_retval.9.gz OLD_FILES+=usr/share/man/man9/sleepq_catch_signals.9.gz # 20140917: hv_kvpd rc.d script removed in favor of devd configuration OLD_FILES+=etc/rc.d/hv_kvpd # 20140917: libnv was accidentally being installed to /usr/lib instead of /lib OLD_LIBS+=usr/lib/libnv.so.0 # 20140829: rc.d/kerberos removed OLD_FILES+=etc/rc.d/kerberos # 20140827: tzdata2014f import OLD_FILES+=usr/share/zoneinfo/Asia/Chongqing OLD_FILES+=usr/share/zoneinfo/Asia/Harbin OLD_FILES+=usr/share/zoneinfo/Asia/Kashgar # 20140814: libopie version bump OLD_LIBS+=usr/lib/libopie.so.7 OLD_LIBS+=usr/lib32/libopie.so.7 # 20140811: otp-sha renamed to otp-sha1 OLD_FILES+=usr/bin/otp-sha OLD_FILES+=usr/share/man/man1/otp-sha.1.gz # 20140807: Remove private lib files that should not be installed. OLD_FILES+=usr/lib32/private/libatf-c.a OLD_FILES+=usr/lib32/private/libatf-c.so OLD_FILES+=usr/lib32/private/libatf-c_p.a OLD_FILES+=usr/lib32/private/libatf-c++.a OLD_FILES+=usr/lib32/private/libatf-c++.so OLD_FILES+=usr/lib32/private/libatf-c++_p.a OLD_FILES+=usr/lib32/private/libbsdstat.a OLD_FILES+=usr/lib32/private/libbsdstat.so OLD_FILES+=usr/lib32/private/libbsdstat_p.a OLD_FILES+=usr/lib32/private/libheimipcc.a OLD_FILES+=usr/lib32/private/libheimipcc.so OLD_FILES+=usr/lib32/private/libheimipcc_p.a OLD_FILES+=usr/lib32/private/libheimipcs.a OLD_FILES+=usr/lib32/private/libheimipcs.so OLD_FILES+=usr/lib32/private/libheimipcs_p.a OLD_FILES+=usr/lib32/private/libldns.a OLD_FILES+=usr/lib32/private/libldns.so OLD_FILES+=usr/lib32/private/libldns_p.a OLD_FILES+=usr/lib32/private/libssh.a OLD_FILES+=usr/lib32/private/libssh.so OLD_FILES+=usr/lib32/private/libssh_p.a OLD_FILES+=usr/lib32/private/libunbound.a OLD_FILES+=usr/lib32/private/libunbound.so OLD_FILES+=usr/lib32/private/libunbound_p.a OLD_FILES+=usr/lib32/private/libucl.a OLD_FILES+=usr/lib32/private/libucl.so OLD_FILES+=usr/lib32/private/libucl_p.a OLD_FILES+=usr/lib/private/libatf-c.a OLD_FILES+=usr/lib/private/libatf-c.so OLD_FILES+=usr/lib/private/libatf-c_p.a OLD_FILES+=usr/lib/private/libatf-c++.a OLD_FILES+=usr/lib/private/libatf-c++.so OLD_FILES+=usr/lib/private/libatf-c++_p.a OLD_FILES+=usr/lib/private/libbsdstat.a OLD_FILES+=usr/lib/private/libbsdstat.so OLD_FILES+=usr/lib/private/libbsdstat_p.a OLD_FILES+=usr/lib/private/libheimipcc.a OLD_FILES+=usr/lib/private/libheimipcc.so OLD_FILES+=usr/lib/private/libheimipcc_p.a OLD_FILES+=usr/lib/private/libheimipcs.a OLD_FILES+=usr/lib/private/libheimipcs.so OLD_FILES+=usr/lib/private/libheimipcs_p.a OLD_FILES+=usr/lib/private/libldns.a OLD_FILES+=usr/lib/private/libldns.so OLD_FILES+=usr/lib/private/libldns_p.a OLD_FILES+=usr/lib/private/libssh.a OLD_FILES+=usr/lib/private/libssh.so OLD_FILES+=usr/lib/private/libssh_p.a OLD_FILES+=usr/lib/private/libunbound.a OLD_FILES+=usr/lib/private/libunbound.so OLD_FILES+=usr/lib/private/libunbound_p.a OLD_FILES+=usr/lib/private/libucl.a OLD_FILES+=usr/lib/private/libucl.so OLD_FILES+=usr/lib/private/libucl_p.a # 20140803: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_change_wiring.9.gz # 20140731 OLD_FILES+=usr/share/man/man9/SYSCTL_ADD_OID.9.gz # 20140728: libsbuf restored to old version. OLD_LIBS+=lib/libsbuf.so.7 OLD_LIBS+=usr/lib32/libsbuf.so.7 # 20140728: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/VOP_GETVOBJECT.9.gz OLD_FILES+=usr/share/man/man9/VOP_CREATEVOBJECT.9.gz OLD_FILES+=usr/share/man/man9/VOP_DESTROYVOBJECT.9.gz # 20140723: renamed to PCBGROUP.9 OLD_FILES+=usr/share/man/man9/PCBGROUPS.9.gz # 20140722: browse_packages_ftp.sh removed OLD_FILES+=usr/share/examples/bsdconfig/browse_packages_ftp.sh # 20140718: Remove obsolete man pages OLD_FILES+=usr/share/man/man9/zero_copy.9.gz OLD_FILES+=usr/share/man/man9/zero_copy_sockets.9.gz # 20140718: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_page_protect.9.gz # 20140717: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_clear_reference.9.gz # 20140716: Remove an incorrectly named man page OLD_FILES+=usr/share/man/man9/pmap_ts_modified.9.gz # 20140712: Removal of bsd.dtrace.mk OLD_FILES+=usr/share/mk/bsd.dtrace.mk # 20140705: turn libreadline into an internal lib OLD_LIBS+=lib/libreadline.so.8 OLD_FILES+=usr/lib/libreadline.a OLD_FILES+=usr/lib/libreadline_p.a OLD_FILES+=usr/lib/libreadline.so OLD_FILES+=usr/lib/libhistory.a OLD_FILES+=usr/lib/libhistory_p.a OLD_FILES+=usr/lib/libhistory.so OLD_LIBS+=usr/lib/libhistory.so.8 OLD_FILES+=usr/lib32/libhistory.a OLD_FILES+=usr/lib32/libhistory.so OLD_LIBS+=usr/lib32/libhistory.so.8 OLD_FILES+=usr/lib32/libhistory_p.a OLD_FILES+=usr/lib32/libreadline.a OLD_FILES+=usr/lib32/libreadline.so OLD_LIBS+=usr/lib32/libreadline.so.8 OLD_FILES+=usr/lib32/libreadline_p.a OLD_FILES+=usr/include/readline/chardefs.h OLD_FILES+=usr/include/readline/history.h OLD_FILES+=usr/include/readline/keymaps.h OLD_FILES+=usr/include/readline/readline.h OLD_FILES+=usr/include/readline/tilde.h OLD_FILES+=usr/include/readline/rlconf.h OLD_FILES+=usr/include/readline/rlstdc.h OLD_FILES+=usr/include/readline/rltypedefs.h OLD_FILES+=usr/include/readline/rltypedefs.h OLD_DIRS+=usr/include/readline OLD_FILES+=usr/share/info/readline.info.gz OLD_FILES+=usr/share/man/man3/readline.3.gz OLD_FILES+=usr/share/man/man3/rlhistory.3.gz # 20140625: csup removal OLD_FILES+=usr/bin/csup OLD_FILES+=usr/bin/cpasswd OLD_FILES+=usr/share/man/man1/csup.1.gz OLD_FILES+=usr/share/man/man1/cpasswd.1.gz OLD_FILES+=usr/share/examples/cvsup/README OLD_FILES+=usr/share/examples/cvsup/cvs-supfile OLD_FILES+=usr/share/examples/cvsup/stable-supfile OLD_FILES+=usr/share/examples/cvsup/standard-supfile OLD_DIRS+=usr/share/examples/cvsup # 20140614: send-pr removal OLD_FILES+=usr/bin/sendbug OLD_FILES+=usr/share/info/send-pr.info.gz OLD_FILES+=usr/share/man/man1/send-pr.1.gz OLD_FILES+=usr/share/man/man1/sendbug.1.gz OLD_FILES+=etc/gnats/freefall OLD_DIRS+=etc/gnats # 20140512: new clang import which bumps version from 3.4 to 3.4.1. OLD_FILES+=usr/include/clang/3.4/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.4/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.4/altivec.h OLD_FILES+=usr/include/clang/3.4/ammintrin.h OLD_FILES+=usr/include/clang/3.4/avx2intrin.h OLD_FILES+=usr/include/clang/3.4/avxintrin.h OLD_FILES+=usr/include/clang/3.4/bmi2intrin.h OLD_FILES+=usr/include/clang/3.4/bmiintrin.h OLD_FILES+=usr/include/clang/3.4/cpuid.h OLD_FILES+=usr/include/clang/3.4/emmintrin.h OLD_FILES+=usr/include/clang/3.4/f16cintrin.h OLD_FILES+=usr/include/clang/3.4/fma4intrin.h OLD_FILES+=usr/include/clang/3.4/fmaintrin.h OLD_FILES+=usr/include/clang/3.4/immintrin.h OLD_FILES+=usr/include/clang/3.4/lzcntintrin.h OLD_FILES+=usr/include/clang/3.4/mm3dnow.h OLD_FILES+=usr/include/clang/3.4/mm_malloc.h OLD_FILES+=usr/include/clang/3.4/mmintrin.h OLD_FILES+=usr/include/clang/3.4/module.map OLD_FILES+=usr/include/clang/3.4/nmmintrin.h OLD_FILES+=usr/include/clang/3.4/pmmintrin.h OLD_FILES+=usr/include/clang/3.4/popcntintrin.h OLD_FILES+=usr/include/clang/3.4/prfchwintrin.h OLD_FILES+=usr/include/clang/3.4/rdseedintrin.h OLD_FILES+=usr/include/clang/3.4/rtmintrin.h OLD_FILES+=usr/include/clang/3.4/shaintrin.h OLD_FILES+=usr/include/clang/3.4/smmintrin.h OLD_FILES+=usr/include/clang/3.4/tbmintrin.h OLD_FILES+=usr/include/clang/3.4/tmmintrin.h OLD_FILES+=usr/include/clang/3.4/wmmintrin.h OLD_FILES+=usr/include/clang/3.4/x86intrin.h OLD_FILES+=usr/include/clang/3.4/xmmintrin.h OLD_FILES+=usr/include/clang/3.4/xopintrin.h OLD_FILES+=usr/include/clang/3.4/arm_neon.h OLD_FILES+=usr/include/clang/3.4/module.map OLD_DIRS+=usr/include/clang/3.4 # 20140505: Bogusly installing src.opts.mk OLD_FILES+=usr/share/mk/src.opts.mk # 20140505: Reject PR kern/187551 OLD_FILES+=usr/tests/sbin/ifconfig/fibs_test # 20140502: Removal of lindev(4) OLD_FILES+=usr/share/man/man4/lindev.4.gz # 20140425 OLD_FILES+=usr/lib/libssp_p.a OLD_FILES+=usr/lib/libstand_p.a OLD_FILES+=usr/lib32/libssp_p.a OLD_FILES+=usr/lib32/libstand_p.a # 20140314: AppleTalk OLD_DIRS+=usr/include/netatalk OLD_FILES+=usr/include/netatalk/aarp.h OLD_FILES+=usr/include/netatalk/at.h OLD_FILES+=usr/include/netatalk/at_extern.h OLD_FILES+=usr/include/netatalk/at_var.h OLD_FILES+=usr/include/netatalk/ddp.h OLD_FILES+=usr/include/netatalk/ddp_pcb.h OLD_FILES+=usr/include/netatalk/ddp_var.h OLD_FILES+=usr/include/netatalk/endian.h OLD_FILES+=usr/include/netatalk/phase2.h # 20140314: Remove IPX/SPX OLD_LIBS+=lib/libipx.so.5 OLD_FILES+=usr/include/netipx/ipx.h OLD_FILES+=usr/include/netipx/ipx_if.h OLD_FILES+=usr/include/netipx/ipx_pcb.h OLD_FILES+=usr/include/netipx/ipx_var.h OLD_FILES+=usr/include/netipx/spx.h OLD_FILES+=usr/include/netipx/spx_debug.h OLD_FILES+=usr/include/netipx/spx_timer.h OLD_FILES+=usr/include/netipx/spx_var.h OLD_DIRS+=usr/include/netipx OLD_FILES+=usr/lib/libipx.a OLD_FILES+=usr/lib/libipx.so OLD_FILES+=usr/lib/libipx_p.a OLD_FILES+=usr/lib32/libipx.a OLD_FILES+=usr/lib32/libipx.so OLD_LIBS+=usr/lib32/libipx.so.5 OLD_FILES+=usr/lib32/libipx_p.a OLD_FILES+=usr/sbin/IPXrouted OLD_FILES+=usr/share/man/man3/ipx.3.gz OLD_FILES+=usr/share/man/man3/ipx_addr.3.gz OLD_FILES+=usr/share/man/man3/ipx_ntoa.3.gz OLD_FILES+=usr/share/man/man4/ef.4.gz OLD_FILES+=usr/share/man/man4/if_ef.4.gz OLD_FILES+=usr/share/man/man8/IPXrouted.8.gz # 20140314: bsdconfig usermgmt rewrite OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/userinput # 20140307: bsdconfig groupmgmt rewrite OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/groupinput # 20140223: Remove libyaml OLD_FILES+=usr/lib/private/libyaml.a OLD_FILES+=usr/lib/private/libyaml.so OLD_LIBS+=usr/lib/private/libyaml.so.1 OLD_FILES+=usr/lib/private/libyaml_p.a OLD_FILES+=usr/lib32/private/libyaml.a OLD_FILES+=usr/lib32/private/libyaml.so OLD_LIBS+=usr/lib32/private/libyaml.so.1 OLD_FILES+=usr/lib32/private/libyaml_p.a # 20140216: new clang import which bumps version from 3.3 to 3.4. OLD_FILES+=usr/bin/llvm-prof OLD_FILES+=usr/include/clang/3.3/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.3/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.3/altivec.h OLD_FILES+=usr/include/clang/3.3/ammintrin.h OLD_FILES+=usr/include/clang/3.3/avx2intrin.h OLD_FILES+=usr/include/clang/3.3/avxintrin.h OLD_FILES+=usr/include/clang/3.3/bmi2intrin.h OLD_FILES+=usr/include/clang/3.3/bmiintrin.h OLD_FILES+=usr/include/clang/3.3/cpuid.h OLD_FILES+=usr/include/clang/3.3/emmintrin.h OLD_FILES+=usr/include/clang/3.3/f16cintrin.h OLD_FILES+=usr/include/clang/3.3/fma4intrin.h OLD_FILES+=usr/include/clang/3.3/fmaintrin.h OLD_FILES+=usr/include/clang/3.3/immintrin.h OLD_FILES+=usr/include/clang/3.3/lzcntintrin.h OLD_FILES+=usr/include/clang/3.3/mm3dnow.h OLD_FILES+=usr/include/clang/3.3/mm_malloc.h OLD_FILES+=usr/include/clang/3.3/mmintrin.h OLD_FILES+=usr/include/clang/3.3/module.map OLD_FILES+=usr/include/clang/3.3/nmmintrin.h OLD_FILES+=usr/include/clang/3.3/pmmintrin.h OLD_FILES+=usr/include/clang/3.3/popcntintrin.h OLD_FILES+=usr/include/clang/3.3/prfchwintrin.h OLD_FILES+=usr/include/clang/3.3/rdseedintrin.h OLD_FILES+=usr/include/clang/3.3/rtmintrin.h OLD_FILES+=usr/include/clang/3.3/smmintrin.h OLD_FILES+=usr/include/clang/3.3/tmmintrin.h OLD_FILES+=usr/include/clang/3.3/wmmintrin.h OLD_FILES+=usr/include/clang/3.3/x86intrin.h OLD_FILES+=usr/include/clang/3.3/xmmintrin.h OLD_FILES+=usr/include/clang/3.3/xopintrin.h OLD_FILES+=usr/share/man/man1/llvm-prof.1.gz OLD_FILES+=usr/share/man/man1/llvm-ranlib.1.gz OLD_DIRS+=usr/include/clang/3.3 # 20140216: nve(4) removed OLD_FILES+=usr/share/man/man4/if_nve.4.gz OLD_FILES+=usr/share/man/man4/nve.4.gz # 20140205: Open Firmware device moved OLD_FILES+=usr/include/dev/ofw/ofw_nexus.h # 20140128: libelf and libdwarf import OLD_LIBS+=usr/lib/libelf.so.1 OLD_LIBS+=usr/lib32/libelf.so.1 OLD_LIBS+=usr/lib/libdwarf.so.3 OLD_LIBS+=usr/lib32/libdwarf.so.3 # 20140123: apicvar header moved to x86 OLD_FILES+=usr/include/machine/apicvar.h # 20131215: libcam version bumped OLD_LIBS+=lib/libcam.so.6 usr/lib32/libcam.so.6 # 20131202: libcapsicum and libcasper moved to /lib/ OLD_LIBS+=usr/lib/libcapsicum.so.0 OLD_LIBS+=usr/lib/libcasper.so.0 # 20131109: extattr(2) mlinks fixed OLD_FILES+=usr/share/man/man2/extattr_delete_list.2.gz OLD_FILES+=usr/share/man/man2/extattr_get_list.2.gz # 20131107: example files removed OLD_FILES+=usr/share/examples/libusb20/aux.c OLD_FILES+=usr/share/examples/libusb20/aux.h # 20131105: tzdata 2013h import OLD_FILES+=usr/share/zoneinfo/America/Shiprock OLD_FILES+=usr/share/zoneinfo/Antarctica/South_Pole # 20131103: WITH_LIBICONV_COMPAT removal OLD_FILES+=usr/include/_libiconv_compat.h OLD_FILES+=usr/lib/libiconv.a OLD_FILES+=usr/lib/libiconv.so OLD_FILES+=usr/lib/libiconv.so.3 OLD_FILES+=usr/lib/libiconv_p.a OLD_FILES+=usr/lib32/libiconv.a OLD_FILES+=usr/lib32/libiconv.so OLD_FILES+=usr/lib32/libiconv.so.3 OLD_FILES+=usr/lib32/libiconv_p.a # 20131103: removal of utxrm(8), use 'utx rm' instead. OLD_FILES+=usr/sbin/utxrm OLD_FILES+=usr/share/man/man8/utxrm.8.gz # 20131031: pkg_install has been removed OLD_FILES+=etc/periodic/daily/220.backup-pkgdb OLD_FILES+=etc/periodic/daily/490.status-pkg-changes OLD_FILES+=etc/periodic/security/460.chkportsum OLD_FILES+=etc/periodic/weekly/400.status-pkg OLD_FILES+=usr/sbin/pkg_add OLD_FILES+=usr/sbin/pkg_create OLD_FILES+=usr/sbin/pkg_delete OLD_FILES+=usr/sbin/pkg_info OLD_FILES+=usr/sbin/pkg_updating OLD_FILES+=usr/sbin/pkg_version OLD_FILES+=usr/share/man/man1/pkg_add.1.gz OLD_FILES+=usr/share/man/man1/pkg_create.1.gz OLD_FILES+=usr/share/man/man1/pkg_delete.1.gz OLD_FILES+=usr/share/man/man1/pkg_info.1.gz OLD_FILES+=usr/share/man/man1/pkg_updating.1.gz OLD_FILES+=usr/share/man/man1/pkg_version.1.gz # 20131030: /etc/keys moved to /usr/share/keys OLD_DIRS+=etc/keys OLD_DIRS+=etc/keys/pkg OLD_DIRS+=etc/keys/pkg/revoked OLD_DIRS+=etc/keys/pkg/trusted OLD_FILES+=etc/keys/pkg/trusted/pkg.freebsd.org.2013102301 # 20131028: ng_fec(4) removed OLD_FILES+=usr/include/netgraph/ng_fec.h OLD_FILES+=usr/share/man/man4/ng_fec.4.gz # 20131027: header moved OLD_FILES+=usr/include/net/pf_mtag.h # 20131023: remove never used iscsi directory OLD_DIRS+=usr/share/examples/iscsi # 20131021: isf(4) removed OLD_FILES+=usr/sbin/isfctl OLD_FILES+=usr/share/man/man4/isf.4.gz OLD_FILES+=usr/share/man/man8/isfctl.8.gz # 20131014: libbsdyml becomes private OLD_FILES+=usr/lib/libbsdyml.a OLD_FILES+=usr/lib/libbsdyml.so OLD_LIBS+=usr/lib/libbsdyml.so.0 OLD_FILES+=usr/lib/libbsdyml_p.a OLD_FILES+=usr/lib32/libbsdyml.a OLD_FILES+=usr/lib32/libbsdyml.so OLD_LIBS+=usr/lib32/libbsdyml.so.0 OLD_FILES+=usr/lib32/libbsdyml_p.a OLD_FILES+=usr/share/man/man3/libbsdyml.3.gz OLD_FILES+=usr/include/bsdyml.h # 20131013: Removal of the ATF tools OLD_FILES+=etc/atf/FreeBSD.conf OLD_FILES+=etc/atf/atf-run.hooks OLD_FILES+=etc/atf/common.conf OLD_FILES+=usr/bin/atf-config OLD_FILES+=usr/bin/atf-report OLD_FILES+=usr/bin/atf-run OLD_FILES+=usr/bin/atf-version OLD_FILES+=usr/share/atf/atf-run.hooks OLD_FILES+=usr/share/examples/atf/atf-run.hooks OLD_FILES+=usr/share/examples/atf/tests-results.css OLD_FILES+=usr/share/man/man1/atf-config.1.gz OLD_FILES+=usr/share/man/man1/atf-report.1.gz OLD_FILES+=usr/share/man/man1/atf-run.1.gz OLD_FILES+=usr/share/man/man1/atf-version.1.gz OLD_FILES+=usr/share/man/man5/atf-formats.5.gz OLD_FILES+=usr/share/xml/atf/tests-results.dtd OLD_FILES+=usr/share/xsl/atf/tests-results.xsl # 20131009: freebsd-version moved from /libexec to /bin OLD_FILES+=libexec/freebsd-version # 20131001: ar and ranlib from binutils not used OLD_FILES+=usr/bin/gnu-ar OLD_FILES+=usr/bin/gnu-ranlib OLD_FILES+=usr/share/man/man1/gnu-ar.1.gz OLD_FILES+=usr/share/man/man1/gnu-ranlib.1.gz # 20130930: BIND removed from base OLD_FILES+=etc/mtree/BIND.chroot.dist OLD_FILES+=etc/namedb OLD_FILES+=etc/periodic/daily/470.status-named OLD_FILES+=usr/bin/dig OLD_FILES+=usr/bin/nslookup OLD_FILES+=usr/bin/nsupdate OLD_DIRS+=usr/include/lwres OLD_FILES+=usr/include/lwres/context.h OLD_FILES+=usr/include/lwres/int.h OLD_FILES+=usr/include/lwres/ipv6.h OLD_FILES+=usr/include/lwres/lang.h OLD_FILES+=usr/include/lwres/list.h OLD_FILES+=usr/include/lwres/lwbuffer.h OLD_FILES+=usr/include/lwres/lwpacket.h OLD_FILES+=usr/include/lwres/lwres.h OLD_FILES+=usr/include/lwres/net.h OLD_FILES+=usr/include/lwres/netdb.h OLD_FILES+=usr/include/lwres/platform.h OLD_FILES+=usr/include/lwres/result.h OLD_FILES+=usr/include/lwres/string.h OLD_FILES+=usr/include/lwres/version.h OLD_FILES+=usr/lib/liblwres.a OLD_FILES+=usr/lib/liblwres.so OLD_LIBS+=usr/lib/liblwres.so.90 OLD_FILES+=usr/lib/liblwres_p.a OLD_FILES+=usr/sbin/arpaname OLD_FILES+=usr/sbin/ddns-confgen OLD_FILES+=usr/sbin/dnssec-dsfromkey OLD_FILES+=usr/sbin/dnssec-keyfromlabel OLD_FILES+=usr/sbin/dnssec-keygen OLD_FILES+=usr/sbin/dnssec-revoke OLD_FILES+=usr/sbin/dnssec-settime OLD_FILES+=usr/sbin/dnssec-signzone OLD_FILES+=usr/sbin/dnssec-verify OLD_FILES+=usr/sbin/genrandom OLD_FILES+=usr/sbin/isc-hmac-fixup OLD_FILES+=usr/sbin/lwresd OLD_FILES+=usr/sbin/named OLD_FILES+=usr/sbin/named-checkconf OLD_FILES+=usr/sbin/named-checkzone OLD_FILES+=usr/sbin/named-compilezone OLD_FILES+=usr/sbin/named-journalprint OLD_FILES+=usr/sbin/named.reconfig OLD_FILES+=usr/sbin/named.reload OLD_FILES+=usr/sbin/nsec3hash OLD_FILES+=usr/sbin/rndc OLD_FILES+=usr/sbin/rndc-confgen OLD_DIRS+=usr/share/doc/bind9 OLD_FILES+=usr/share/doc/bind9/CHANGES OLD_FILES+=usr/share/doc/bind9/COPYRIGHT OLD_FILES+=usr/share/doc/bind9/FAQ OLD_FILES+=usr/share/doc/bind9/HISTORY OLD_FILES+=usr/share/doc/bind9/README OLD_DIRS+=usr/share/doc/bind9/arm OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch01.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch02.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch03.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch04.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch05.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch06.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch07.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch08.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch09.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch10.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.pdf OLD_FILES+=usr/share/doc/bind9/arm/man.arpaname.html OLD_FILES+=usr/share/doc/bind9/arm/man.ddns-confgen.html OLD_FILES+=usr/share/doc/bind9/arm/man.dig.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-dsfromkey.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keyfromlabel.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keygen.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-revoke.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-settime.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-signzone.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-verify.html OLD_FILES+=usr/share/doc/bind9/arm/man.genrandom.html OLD_FILES+=usr/share/doc/bind9/arm/man.host.html OLD_FILES+=usr/share/doc/bind9/arm/man.isc-hmac-fixup.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkconf.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkzone.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-journalprint.html OLD_FILES+=usr/share/doc/bind9/arm/man.named.html OLD_FILES+=usr/share/doc/bind9/arm/man.nsec3hash.html OLD_FILES+=usr/share/doc/bind9/arm/man.nsupdate.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc-confgen.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.conf.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.html OLD_DIRS+=usr/share/doc/bind9/misc OLD_FILES+=usr/share/doc/bind9/misc/dnssec OLD_FILES+=usr/share/doc/bind9/misc/format-options.pl OLD_FILES+=usr/share/doc/bind9/misc/ipv6 OLD_FILES+=usr/share/doc/bind9/misc/migration OLD_FILES+=usr/share/doc/bind9/misc/migration-4to9 OLD_FILES+=usr/share/doc/bind9/misc/options OLD_FILES+=usr/share/doc/bind9/misc/rfc-compliance OLD_FILES+=usr/share/doc/bind9/misc/roadmap OLD_FILES+=usr/share/doc/bind9/misc/sdb OLD_FILES+=usr/share/doc/bind9/misc/sort-options.pl OLD_FILES+=usr/share/man/man1/arpaname.1.gz OLD_FILES+=usr/share/man/man1/dig.1.gz OLD_FILES+=usr/share/man/man1/nslookup.1.gz OLD_FILES+=usr/share/man/man1/nsupdate.1.gz OLD_FILES+=usr/share/man/man3/lwres.3.gz OLD_FILES+=usr/share/man/man3/lwres_addr_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_add.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_back.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_clear.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_first.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_forward.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint16.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint32.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint8.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_init.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_invalidate.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint16.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint32.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint8.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_subtract.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_clear.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_get.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_init.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_print.3.gz OLD_FILES+=usr/share/man/man3/lwres_config.3.gz OLD_FILES+=usr/share/man/man3/lwres_context.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_allocmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_create.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_destroy.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_freemem.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_initserial.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_nextserial.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_sendrecv.3.gz OLD_FILES+=usr/share/man/man3/lwres_endhostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_endhostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_freeaddrinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_freehostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabn.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gai_strerror.3.gz OLD_FILES+=usr/share/man/man3/lwres_getaddrinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_getaddrsbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname2.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnode.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnodebyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnodebyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_getnamebyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_getnameinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_getrrsetbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnba.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_herror.3.gz OLD_FILES+=usr/share/man/man3/lwres_hstrerror.3.gz OLD_FILES+=usr/share/man/man3/lwres_inetntop.3.gz OLD_FILES+=usr/share/man/man3/lwres_lwpacket_parseheader.3.gz OLD_FILES+=usr/share/man/man3/lwres_lwpacket_renderheader.3.gz OLD_FILES+=usr/share/man/man3/lwres_net_ntop.3.gz OLD_FILES+=usr/share/man/man3/lwres_noop.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_packet.3.gz OLD_FILES+=usr/share/man/man3/lwres_resutil.3.gz OLD_FILES+=usr/share/man/man3/lwres_sethostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_sethostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_string_parse.3.gz OLD_FILES+=usr/share/man/man5/named.conf.5.gz OLD_FILES+=usr/share/man/man5/rndc.conf.5.gz OLD_FILES+=usr/share/man/man8/ddns-confgen.8.gz OLD_FILES+=usr/share/man/man8/dnssec-dsfromkey.8.gz OLD_FILES+=usr/share/man/man8/dnssec-keyfromlabel.8.gz OLD_FILES+=usr/share/man/man8/dnssec-keygen.8.gz OLD_FILES+=usr/share/man/man8/dnssec-revoke.8.gz OLD_FILES+=usr/share/man/man8/dnssec-settime.8.gz OLD_FILES+=usr/share/man/man8/dnssec-signzone.8.gz OLD_FILES+=usr/share/man/man8/dnssec-verify.8.gz OLD_FILES+=usr/share/man/man8/genrandom.8.gz OLD_FILES+=usr/share/man/man8/isc-hmac-fixup.8.gz OLD_FILES+=usr/share/man/man8/lwresd.8.gz OLD_FILES+=usr/share/man/man8/named-checkconf.8.gz OLD_FILES+=usr/share/man/man8/named-checkzone.8.gz OLD_FILES+=usr/share/man/man8/named-compilezone.8.gz OLD_FILES+=usr/share/man/man8/named-journalprint.8.gz OLD_FILES+=usr/share/man/man8/named.8.gz OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz OLD_FILES+=usr/share/man/man8/named.reload.8.gz OLD_FILES+=usr/share/man/man8/nsec3hash.8.gz OLD_FILES+=usr/share/man/man8/rndc-confgen.8.gz OLD_FILES+=usr/share/man/man8/rndc.8.gz OLD_DIRS+=var/named/dev OLD_DIRS+=var/named/etc OLD_DIRS+=var/named/etc/namedb OLD_FILES+=var/named/etc/namedb/PROTO.localhost-v6.rev OLD_FILES+=var/named/etc/namedb/PROTO.localhost.rev OLD_DIRS+=var/named/etc/namedb/dynamic OLD_FILES+=var/named/etc/namedb/make-localhost OLD_DIRS+=var/named/etc/namedb/master OLD_FILES+=var/named/etc/namedb/master/empty.db OLD_FILES+=var/named/etc/namedb/master/localhost-forward.db OLD_FILES+=var/named/etc/namedb/master/localhost-reverse.db #OLD_FILES+=var/named/etc/namedb/named.conf # intentionally left out OLD_FILES+=var/named/etc/namedb/named.root OLD_DIRS+=var/named/etc/namedb/working OLD_DIRS+=var/named/etc/namedb/slave OLD_DIRS+=var/named/var OLD_DIRS+=var/named/var/dump OLD_DIRS+=var/named/var/log OLD_DIRS+=var/named/var/run OLD_DIRS+=var/named/var/run/named OLD_DIRS+=var/named/var/stats OLD_DIRS+=var/run/named # 20130923: example moved OLD_FILES+=usr/share/examples/bsdconfig/browse_packages.sh # 20130908: libssh becomes private OLD_FILES+=usr/lib/libssh.a OLD_FILES+=usr/lib/libssh.so OLD_LIBS+=usr/lib/libssh.so.5 OLD_FILES+=usr/lib/libssh_p.a OLD_FILES+=usr/lib32/libssh.a OLD_FILES+=usr/lib32/libssh.so OLD_LIBS+=usr/lib32/libssh.so.5 OLD_FILES+=usr/lib32/libssh_p.a # 20130903: gnupatch is no more OLD_FILES+=usr/bin/gnupatch OLD_FILES+=usr/share/man/man1/gnupatch.1.gz # 20130829: bsdpatch is patch unconditionally OLD_FILES+=usr/bin/bsdpatch OLD_FILES+=usr/share/man/man1/bsdpatch.1.gz # 20130822: bind 9.9.3-P2 import OLD_LIBS+=usr/lib/liblwres.so.80 # 20130814: vm_page_busy(9) OLD_FILES+=usr/share/man/man9/vm_page_flash.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io_finish.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io_start.9.gz OLD_FILES+=usr/share/man/man9/vm_page_wakeup.9.gz # 20130710: libkvm version bump OLD_LIBS+=lib/libkvm.so.5 OLD_LIBS+=usr/lib32/libkvm.so.5 # 20130623: dialog update from 1.1 to 1.2 OLD_LIBS+=usr/lib/libdialog.so.7 OLD_LIBS+=usr/lib32/libdialog.so.7 # 20130616: vfs_mount.9 removed OLD_FILES+=usr/share/man/man9/vfs_mount.9.gz # 20130614: remove CVS from base OLD_FILES+=usr/bin/cvs OLD_FILES+=usr/bin/cvsbug OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ps.gz OLD_DIRS+=usr/share/doc/psd/28.cvs OLD_FILES+=usr/share/examples/cvs/contrib/README OLD_FILES+=usr/share/examples/cvs/contrib/clmerge OLD_FILES+=usr/share/examples/cvs/contrib/cln_hist OLD_FILES+=usr/share/examples/cvs/contrib/commit_prep OLD_FILES+=usr/share/examples/cvs/contrib/cvs2vendor OLD_FILES+=usr/share/examples/cvs/contrib/cvs_acls OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck.man OLD_FILES+=usr/share/examples/cvs/contrib/cvshelp.man OLD_FILES+=usr/share/examples/cvs/contrib/descend.man OLD_FILES+=usr/share/examples/cvs/contrib/easy-import OLD_FILES+=usr/share/examples/cvs/contrib/intro.doc OLD_FILES+=usr/share/examples/cvs/contrib/log OLD_FILES+=usr/share/examples/cvs/contrib/log_accum OLD_FILES+=usr/share/examples/cvs/contrib/mfpipe OLD_FILES+=usr/share/examples/cvs/contrib/rcs-to-cvs OLD_FILES+=usr/share/examples/cvs/contrib/rcs2log OLD_FILES+=usr/share/examples/cvs/contrib/rcslock OLD_FILES+=usr/share/examples/cvs/contrib/sccs2rcs OLD_DIRS+=usr/share/examples/cvs/contrib OLD_DIRS+=usr/share/examples/cvs OLD_FILES+=usr/share/info/cvs.info.gz OLD_FILES+=usr/share/info/cvsclient.info.gz OLD_FILES+=usr/share/man/man1/cvs.1.gz OLD_FILES+=usr/share/man/man5/cvs.5.gz OLD_FILES+=usr/share/man/man8/cvsbug.8.gz # 20130607: WITH_DEBUG_FILES added OLD_FILES+=lib/libufs.so.6.symbols OLD_FILES+=usr/lib32/libufs.so.6.symbols # 20130417: nfs fha moved from nfsserver to nfs OLD_FILES+=usr/include/nfsserver/nfs_fha.h # 20130411: new clang import which bumps version from 3.2 to 3.3. OLD_FILES+=usr/include/clang/3.2/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.2/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.2/altivec.h OLD_FILES+=usr/include/clang/3.2/ammintrin.h OLD_FILES+=usr/include/clang/3.2/avx2intrin.h OLD_FILES+=usr/include/clang/3.2/avxintrin.h OLD_FILES+=usr/include/clang/3.2/bmi2intrin.h OLD_FILES+=usr/include/clang/3.2/bmiintrin.h OLD_FILES+=usr/include/clang/3.2/cpuid.h OLD_FILES+=usr/include/clang/3.2/emmintrin.h OLD_FILES+=usr/include/clang/3.2/f16cintrin.h OLD_FILES+=usr/include/clang/3.2/fma4intrin.h OLD_FILES+=usr/include/clang/3.2/fmaintrin.h OLD_FILES+=usr/include/clang/3.2/immintrin.h OLD_FILES+=usr/include/clang/3.2/lzcntintrin.h OLD_FILES+=usr/include/clang/3.2/mm3dnow.h OLD_FILES+=usr/include/clang/3.2/mm_malloc.h OLD_FILES+=usr/include/clang/3.2/mmintrin.h OLD_FILES+=usr/include/clang/3.2/module.map OLD_FILES+=usr/include/clang/3.2/nmmintrin.h OLD_FILES+=usr/include/clang/3.2/pmmintrin.h OLD_FILES+=usr/include/clang/3.2/popcntintrin.h OLD_FILES+=usr/include/clang/3.2/rtmintrin.h OLD_FILES+=usr/include/clang/3.2/smmintrin.h OLD_FILES+=usr/include/clang/3.2/tmmintrin.h OLD_FILES+=usr/include/clang/3.2/wmmintrin.h OLD_FILES+=usr/include/clang/3.2/x86intrin.h OLD_FILES+=usr/include/clang/3.2/xmmintrin.h OLD_FILES+=usr/include/clang/3.2/xopintrin.h OLD_DIRS+=usr/include/clang/3.2 # 20130404: legacy ATA stack removed OLD_FILES+=etc/periodic/daily/405.status-ata-raid OLD_FILES+=rescue/atacontrol OLD_FILES+=sbin/atacontrol OLD_FILES+=usr/share/man/man8/atacontrol.8.gz OLD_FILES+=usr/share/man/man4/atapicam.4.gz OLD_FILES+=usr/share/man/man4/ataraid.4.gz OLD_FILES+=usr/sbin/burncd OLD_FILES+=usr/share/man/man8/burncd.8.gz # 20130316: vinum.4 removed OLD_FILES+=usr/share/man/man4/vinum.4.gz # 20130312: fortunes-o removed OLD_FILES+=usr/share/games/fortune/fortunes-o OLD_FILES+=usr/share/games/fortune/fortunes-o.dat # 20130311: Ports are no more available via cvsup OLD_FILES+=usr/share/examples/cvsup/ports-supfile OLD_FILES+=usr/share/examples/cvsup/refuse OLD_FILES+=usr/share/examples/cvsup/refuse.README # 20130309: NWFS and NCP supports removed OLD_FILES+=usr/bin/ncplist OLD_FILES+=usr/bin/ncplogin OLD_FILES+=usr/bin/ncplogout OLD_FILES+=usr/include/fs/nwfs/nwfs.h OLD_FILES+=usr/include/fs/nwfs/nwfs_mount.h OLD_FILES+=usr/include/fs/nwfs/nwfs_node.h OLD_FILES+=usr/include/fs/nwfs/nwfs_subr.h OLD_DIRS+=usr/include/fs/nwfs OLD_FILES+=usr/include/netncp/ncp.h OLD_FILES+=usr/include/netncp/ncp_cfg.h OLD_FILES+=usr/include/netncp/ncp_conn.h OLD_FILES+=usr/include/netncp/ncp_file.h OLD_FILES+=usr/include/netncp/ncp_lib.h OLD_FILES+=usr/include/netncp/ncp_ncp.h OLD_FILES+=usr/include/netncp/ncp_nls.h OLD_FILES+=usr/include/netncp/ncp_rcfile.h OLD_FILES+=usr/include/netncp/ncp_rq.h OLD_FILES+=usr/include/netncp/ncp_sock.h OLD_FILES+=usr/include/netncp/ncp_subr.h OLD_FILES+=usr/include/netncp/ncp_user.h OLD_FILES+=usr/include/netncp/ncpio.h OLD_FILES+=usr/include/netncp/nwerror.h OLD_DIRS+=usr/include/netncp OLD_FILES+=usr/lib/libncp.a OLD_FILES+=usr/lib/libncp.so OLD_LIBS+=usr/lib/libncp.so.4 OLD_FILES+=usr/lib/libncp_p.a OLD_FILES+=usr/lib32/libncp.a OLD_FILES+=usr/lib32/libncp.so OLD_LIBS+=usr/lib32/libncp.so.4 OLD_FILES+=usr/lib32/libncp_p.a OLD_FILES+=usr/sbin/mount_nwfs OLD_FILES+=usr/share/examples/nwclient/dot.nwfsrc OLD_FILES+=usr/share/examples/nwclient/nwfs.sh.sample OLD_DIRS+=usr/share/examples/nwclient OLD_FILES+=usr/share/man/man1/ncplist.1.gz OLD_FILES+=usr/share/man/man1/ncplogin.1.gz OLD_FILES+=usr/share/man/man1/ncplogout.1.gz OLD_FILES+=usr/share/man/man8/mount_nwfs.8.gz # 20130302: NTFS support removed OLD_FILES+=rescue/mount_ntfs OLD_FILES+=sbin/mount_ntfs OLD_FILES+=usr/include/fs/ntfs/ntfs.h OLD_FILES+=usr/include/fs/ntfs/ntfs_compr.h OLD_FILES+=usr/include/fs/ntfs/ntfs_ihash.h OLD_FILES+=usr/include/fs/ntfs/ntfs_inode.h OLD_FILES+=usr/include/fs/ntfs/ntfs_subr.h OLD_FILES+=usr/include/fs/ntfs/ntfs_vfsops.h OLD_FILES+=usr/include/fs/ntfs/ntfsmount.h OLD_DIRS+=usr/include/fs/ntfs OLD_FILES+=usr/share/man/man8/mount_ntfs.8.gz # 20130302: PORTALFS support removed OLD_FILES+=usr/include/fs/portalfs/portal.h OLD_DIRS+=usr/include/fs/portalfs OLD_FILES+=usr/sbin/mount_portalfs OLD_FILES+=usr/share/examples/portal/README OLD_FILES+=usr/share/examples/portal/portal.conf OLD_DIRS+=usr/share/examples/portal OLD_FILES+=usr/share/man/man8/mount_portalfs.8.gz # 20130302: CODAFS support removed OLD_FILES+=usr/share/man/man4/coda.4.gz # 20130302: XFS support removed OLD_FILES+=usr/share/man/man5/xfs.5.gz # 20130302: Capsicum overhaul OLD_FILES+=usr/share/man/man2/cap_getrights.2.gz OLD_FILES+=usr/share/man/man2/cap_new.2.gz # 20130213: OpenSSL 1.0.1e import OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover_init.3.gz # 20130116: removed long unused directories for .1aout section manpages OLD_FILES+=usr/share/man/en.ISO8859-1/man1aout OLD_FILES+=usr/share/man/en.UTF-8/man1aout OLD_DIRS+=usr/share/man/man1aout OLD_DIRS+=usr/share/man/cat1aout OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1aout OLD_DIRS+=usr/share/man/en.UTF-8/cat1aout # 20130110: bsd.compat.mk removed OLD_FILES+=usr/share/mk/bsd.compat.mk # 20130103: gnats-supfile removed OLD_FILES+=usr/share/examples/cvsup/gnats-supfile # 20121230: libdisk removed OLD_FILES+=usr/share/man/man3/libdisk.3.gz usr/include/libdisk.h OLD_FILES+=usr/lib/libdisk.a usr/lib32/libdisk.a # 20121230: remove wrongly created directories for auditdistd OLD_DIRS+=var/dist OLD_DIRS+=var/remote # 20121114: zpool-features manual page moved from section 5 to 7 OLD_FILES+=usr/share/man/man5/zpool-features.5.gz # 20121022: remove harp, hfa and idt man page OLD_FILES+=usr/share/man/man4/harp.4.gz OLD_FILES+=usr/share/man/man4/hfa.4.gz OLD_FILES+=usr/share/man/man4/idt.4.gz OLD_FILES+=usr/share/man/man4/if_idt.4.gz # 20121022: VFS_LOCK_GIANT elimination OLD_FILES+=usr/share/man/man9/VFS_LOCK_GIANT.9.gz OLD_FILES+=usr/share/man/man9/VFS_UNLOCK_GIANT.9.gz # 20121004: remove incomplete unwind.h OLD_FILES+=usr/include/clang/3.2/unwind.h # 20120910: NetBSD compat shims removed OLD_FILES+=usr/include/cam/scsi/scsi_low_pisa.h OLD_FILES+=usr/include/sys/device_port.h # 20120909: doc and www supfiles removed OLD_FILES+=usr/share/examples/cvsup/doc-supfile OLD_FILES+=usr/share/examples/cvsup/www-supfile # 20120908: pf cleanup OLD_FILES+=usr/include/net/if_pflow.h # 20120816: new clang import which bumps version from 3.1 to 3.2 OLD_FILES+=usr/bin/llvm-ld OLD_FILES+=usr/bin/llvm-stub OLD_FILES+=usr/include/clang/3.1/altivec.h OLD_FILES+=usr/include/clang/3.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.1/avxintrin.h OLD_FILES+=usr/include/clang/3.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.1/cpuid.h OLD_FILES+=usr/include/clang/3.1/emmintrin.h OLD_FILES+=usr/include/clang/3.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.1/immintrin.h OLD_FILES+=usr/include/clang/3.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.1/mmintrin.h OLD_FILES+=usr/include/clang/3.1/module.map OLD_FILES+=usr/include/clang/3.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.1/smmintrin.h OLD_FILES+=usr/include/clang/3.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.1/unwind.h OLD_FILES+=usr/include/clang/3.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.1/x86intrin.h OLD_FILES+=usr/include/clang/3.1/xmmintrin.h OLD_DIRS+=usr/include/clang/3.1 OLD_FILES+=usr/share/man/man1/llvm-ld.1.gz # 20120712: OpenSSL 1.0.1c import OLD_LIBS+=lib/libcrypto.so.6 OLD_LIBS+=usr/lib/libssl.so.6 OLD_LIBS+=usr/lib32/libcrypto.so.6 OLD_LIBS+=usr/lib32/libssl.so.6 OLD_FILES+=usr/include/openssl/aes_locl.h OLD_FILES+=usr/include/openssl/bio_lcl.h OLD_FILES+=usr/include/openssl/e_os.h OLD_FILES+=usr/include/openssl/fips.h OLD_FILES+=usr/include/openssl/fips_rand.h OLD_FILES+=usr/include/openssl/md2.h OLD_FILES+=usr/include/openssl/pq_compat.h OLD_FILES+=usr/include/openssl/store.h OLD_FILES+=usr/include/openssl/tmdiff.h OLD_FILES+=usr/include/openssl/ui_locl.h OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_id_callback.3.gz # 20120621: remove old man page OLD_FILES+=usr/share/man/man8/vnconfig.8.gz # 20120619: TOE support updated OLD_FILES+=usr/include/netinet/toedev.h # 20120613: auth.conf removed OLD_FILES+=etc/auth.conf OLD_FILES+=usr/share/examples/etc/auth.conf OLD_FILES+=usr/share/man/man3/auth.3.gz OLD_FILES+=usr/share/man/man3/auth_getval.3.gz OLD_FILES+=usr/share/man/man5/auth.conf.5.gz # 20120530: kde pam lives now in ports OLD_FILES+=etc/pam.d/kde # 20120521: byacc import OLD_FILES+=usr/bin/yyfix OLD_FILES+=usr/share/man/man1/yyfix.1.gz # 20120505: new clang import installed a redundant internal header OLD_FILES+=usr/include/clang/3.1/stdalign.h # 20120428: MD2 removed from libmd OLD_LIBS+=lib/libmd.so.5 OLD_FILES+=usr/include/md2.h OLD_LIBS+=usr/lib32/libmd.so.5 OLD_FILES+=usr/share/man/man3/MD2Data.3.gz OLD_FILES+=usr/share/man/man3/MD2End.3.gz OLD_FILES+=usr/share/man/man3/MD2File.3.gz OLD_FILES+=usr/share/man/man3/MD2FileChunk.3.gz OLD_FILES+=usr/share/man/man3/MD2Final.3.gz OLD_FILES+=usr/share/man/man3/MD2Init.3.gz OLD_FILES+=usr/share/man/man3/MD2Update.3.gz OLD_FILES+=usr/share/man/man3/md2.3.gz # 20120425: libusb version bump (r234684) OLD_LIBS+=usr/lib/libusb.so.2 OLD_LIBS+=usr/lib32/libusb.so.2 OLD_FILES+=usr/share/man/man3/libsub_get_active_config_descriptor.3.gz # 20120415: new clang import which bumps version from 3.0 to 3.1 OLD_FILES+=usr/include/clang/3.0/altivec.h OLD_FILES+=usr/include/clang/3.0/avxintrin.h OLD_FILES+=usr/include/clang/3.0/emmintrin.h OLD_FILES+=usr/include/clang/3.0/immintrin.h OLD_FILES+=usr/include/clang/3.0/mm3dnow.h OLD_FILES+=usr/include/clang/3.0/mm_malloc.h OLD_FILES+=usr/include/clang/3.0/mmintrin.h OLD_FILES+=usr/include/clang/3.0/nmmintrin.h OLD_FILES+=usr/include/clang/3.0/pmmintrin.h OLD_FILES+=usr/include/clang/3.0/smmintrin.h OLD_FILES+=usr/include/clang/3.0/tmmintrin.h OLD_FILES+=usr/include/clang/3.0/wmmintrin.h OLD_FILES+=usr/include/clang/3.0/x86intrin.h OLD_FILES+=usr/include/clang/3.0/xmmintrin.h OLD_DIRS+=usr/include/clang/3.0 # 20120412: BIND 9.8.1 release notes removed OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.pdf OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.txt OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.html OLD_FILES+=usr/share/doc/bind9/release-notes.css # 20120330: legacy(4) moved to x86 OLD_FILES+=usr/include/machine/legacyvar.h # 20120324: MPI headers updated OLD_FILES+=usr/include/dev/mpt/mpilib/mpi_inb.h # 20120322: hwpmc_mips24k.h removed OLD_FILES+=usr/include/dev/hwpmc/hwpmc_mips24k.h # 20120322: Update heimdal to 1.5.1. OLD_FILES+=usr/include/krb5-v4compat.h \ usr/include/krb_err.h \ usr/include/hdb-private.h \ usr/share/man/man3/krb5_addresses.3.gz \ usr/share/man/man3/krb5_cc_cursor.3.gz \ usr/share/man/man3/krb5_cc_ops.3.gz \ usr/share/man/man3/krb5_config.3.gz \ usr/share/man/man3/krb5_config_get_int_default.3.gz \ usr/share/man/man3/krb5_context.3.gz \ usr/share/man/man3/krb5_data.3.gz \ usr/share/man/man3/krb5_err.3.gz \ usr/share/man/man3/krb5_errx.3.gz \ usr/share/man/man3/krb5_keyblock.3.gz \ usr/share/man/man3/krb5_keytab_entry.3.gz \ usr/share/man/man3/krb5_kt_cursor.3.gz \ usr/share/man/man3/krb5_kt_ops.3.gz \ usr/share/man/man3/krb5_set_warn_dest.3.gz \ usr/share/man/man3/krb5_verr.3.gz \ usr/share/man/man3/krb5_verrx.3.gz \ usr/share/man/man3/krb5_vwarnx.3.gz \ usr/share/man/man3/krb5_warn.3.gz \ usr/share/man/man3/krb5_warnx.3.gz OLD_LIBS+=usr/lib/libasn1.so.10 \ usr/lib/libhdb.so.10 \ usr/lib/libheimntlm.so.10 \ usr/lib/libhx509.so.10 \ usr/lib/libkadm5clnt.so.10 \ usr/lib/libkadm5srv.so.10 \ usr/lib/libkafs5.so.10 \ usr/lib/libkrb5.so.10 \ usr/lib/libroken.so.10 \ usr/lib32/libasn1.so.10 \ usr/lib32/libhdb.so.10 \ usr/lib32/libheimntlm.so.10 \ usr/lib32/libhx509.so.10 \ usr/lib32/libkadm5clnt.so.10 \ usr/lib32/libkadm5srv.so.10 \ usr/lib32/libkafs5.so.10 \ usr/lib32/libkrb5.so.10 \ usr/lib32/libroken.so.10 # 20120309: Remove fifofs header files. OLD_FILES+=usr/include/fs/fifofs/fifo.h OLD_DIRS+=usr/include/fs/fifofs # 20120304: xlocale cleanup OLD_FILES+=usr/include/_xlocale_ctype.h # 20120225: libarchive 3.0.3 OLD_FILES+=usr/share/man/man3/archive_read_data_into_buffer.3.gz \ usr/share/man/man3/archive_read_support_compression_all.3.gz \ usr/share/man/man3/archive_read_support_compression_bzip2.3.gz \ usr/share/man/man3/archive_read_support_compression_compress.3.gz \ usr/share/man/man3/archive_read_support_compression_gzip.3.gz \ usr/share/man/man3/archive_read_support_compression_lzma.3.gz \ usr/share/man/man3/archive_read_support_compression_none.3.gz \ usr/share/man/man3/archive_read_support_compression_program.3.gz \ usr/share/man/man3/archive_read_support_compression_program_signature.3.gz \ usr/share/man/man3/archive_read_support_compression_xz.3.gz \ usr/share/man/man3/archive_write_set_callbacks.3.gz \ usr/share/man/man3/archive_write_set_compression_bzip2.3.gz \ usr/share/man/man3/archive_write_set_compression_compress.3.gz \ usr/share/man/man3/archive_write_set_compression_gzip.3.gz \ usr/share/man/man3/archive_write_set_compression_none.3.gz \ usr/share/man/man3/archive_write_set_compression_program.3.gz OLD_LIBS+=usr/lib/libarchive.so.5 OLD_LIBS+=usr/lib32/libarchive.so.5 # 20120113: removal of wtmpcvt(1) OLD_FILES+=usr/bin/wtmpcvt OLD_FILES+=usr/share/man/man1/wtmpcvt.1.gz # 20111214: eventtimers(7) moved to eventtimers(4) OLD_FILES+=usr/share/man/man7/eventtimers.7.gz # 20111125: amd(4) removed OLD_FILES+=usr/share/man/man4/amd.4.gz # 20111125: libodialog removed OLD_FILES+=usr/lib/libodialog.a OLD_FILES+=usr/lib/libodialog.so OLD_LIBS+=usr/lib/libodialog.so.7 OLD_FILES+=usr/lib/libodialog_p.a OLD_FILES+=usr/lib32/libodialog.a OLD_FILES+=usr/lib32/libodialog.so OLD_LIBS+=usr/lib32/libodialog.so.7 OLD_FILES+=usr/lib32/libodialog_p.a # 20110930: sysinstall removed OLD_FILES+=usr/sbin/sysinstall OLD_FILES+=usr/share/man/man8/sysinstall.8.gz OLD_FILES+=usr/lib/libftpio.a OLD_FILES+=usr/lib/libftpio.so OLD_LIBS+=usr/lib/libftpio.so.8 OLD_FILES+=usr/lib/libftpio_p.a OLD_FILES+=usr/lib32/libftpio.a OLD_FILES+=usr/lib32/libftpio.so OLD_LIBS+=usr/lib32/libftpio.so.8 OLD_FILES+=usr/lib32/libftpio_p.a OLD_FILES+=usr/include/ftpio.h OLD_FILES+=usr/share/man/man3/ftpio.3.gz # 20110915: rename congestion control manpages OLD_FILES+=usr/share/man/man9/cc.9.gz # 20110831: atomic page flags operations OLD_FILES+=usr/share/man/man9/vm_page_flag.9.gz OLD_FILES+=usr/share/man/man9/vm_page_flag_clear.9.gz OLD_FILES+=usr/share/man/man9/vm_page_flag_set.9.gz # 20110828: library version bump for 9.0 OLD_LIBS+=lib/libcam.so.5 OLD_LIBS+=lib/libpcap.so.7 OLD_LIBS+=lib/libufs.so.5 OLD_LIBS+=usr/lib/libbsnmp.so.5 OLD_LIBS+=usr/lib/libdwarf.so.2 OLD_LIBS+=usr/lib/libopie.so.6 OLD_LIBS+=usr/lib/librtld_db.so.1 OLD_LIBS+=usr/lib/libtacplus.so.4 OLD_LIBS+=usr/lib32/libcam.so.5 OLD_LIBS+=usr/lib32/libpcap.so.7 OLD_LIBS+=usr/lib32/libufs.so.5 OLD_LIBS+=usr/lib32/libbsnmp.so.5 OLD_LIBS+=usr/lib32/libdwarf.so.2 OLD_LIBS+=usr/lib32/libopie.so.6 OLD_LIBS+=usr/lib32/librtld_db.so.1 OLD_LIBS+=usr/lib32/libtacplus.so.4 # 20110817: no more acd.4, ad.4, afd.4 and ast.4 OLD_FILES+=usr/share/man/man4/acd.4.gz OLD_FILES+=usr/share/man/man4/ad.4.gz OLD_FILES+=usr/share/man/man4/afd.4.gz OLD_FILES+=usr/share/man/man4/ast.4.gz # 20110718: no longer useful in the age of rc.d OLD_FILES+=usr/sbin/named.reconfig OLD_FILES+=usr/sbin/named.reload OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz OLD_FILES+=usr/share/man/man8/named.reload.8.gz # 20110716: bind 9.8.0 import OLD_LIBS+=usr/lib/liblwres.so.50 OLD_FILES+=usr/share/doc/bind9/KNOWN-DEFECTS OLD_FILES+=usr/share/doc/bind9/NSEC3-NOTES OLD_FILES+=usr/share/doc/bind9/README.idnkit OLD_FILES+=usr/share/doc/bind9/README.pkcs11 # 20110709: vm_map_clean.9 -> vm_map_sync.9 OLD_FILES+=usr/share/man/man9/vm_map_clean.9.gz # 20110709: Catch up with removal of these functions. OLD_FILES+=usr/share/man/man9/vm_page_copy.9.gz OLD_FILES+=usr/share/man/man9/vm_page_protect.9.gz OLD_FILES+=usr/share/man/man9/vm_page_zero_fill.9.gz # 20110707: script no longer needed by /etc/rc.d/nfsd OLD_FILES+=etc/rc.d/nfsserver # 20110705: files moved so both NFS clients can share them OLD_FILES+=usr/include/nfsclient/krpc.h OLD_FILES+=usr/include/nfsclient/nfsdiskless.h # 20110705: the switch of default NFS client to the new one OLD_FILES+=sbin/mount_newnfs OLD_FILES+=usr/share/man/man8/mount_newnfs.8.gz OLD_FILES+=usr/include/nfsclient/nfs_kdtrace.h # 20110628: calendar.msk removed OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.msk # 20110517: libpkg removed OLD_FILES+=usr/include/pkg.h OLD_FILES+=usr/lib/libpkg.a OLD_FILES+=usr/lib/libpkg.so OLD_LIBS+=usr/lib/libpkg.so.0 OLD_FILES+=usr/lib/libpkg_p.a OLD_FILES+=usr/lib32/libpkg.a OLD_FILES+=usr/lib32/libpkg.so OLD_LIBS+=usr/lib32/libpkg.so.0 OLD_FILES+=usr/lib32/libpkg_p.a # 20110517: libsbuf version bump OLD_LIBS+=lib/libsbuf.so.5 OLD_LIBS+=usr/lib32/libsbuf.so.5 # 20110502: new clang import which bumps version from 2.9 to 3.0 OLD_FILES+=usr/include/clang/2.9/emmintrin.h OLD_FILES+=usr/include/clang/2.9/mm_malloc.h OLD_FILES+=usr/include/clang/2.9/mmintrin.h OLD_FILES+=usr/include/clang/2.9/pmmintrin.h OLD_FILES+=usr/include/clang/2.9/tmmintrin.h OLD_FILES+=usr/include/clang/2.9/xmmintrin.h OLD_DIRS+=usr/include/clang/2.9 # 20110417: removal of Objective-C support OLD_FILES+=usr/include/objc/encoding.h OLD_FILES+=usr/include/objc/hash.h OLD_FILES+=usr/include/objc/NXConstStr.h OLD_FILES+=usr/include/objc/objc-api.h OLD_FILES+=usr/include/objc/objc-decls.h OLD_FILES+=usr/include/objc/objc-list.h OLD_FILES+=usr/include/objc/objc.h OLD_FILES+=usr/include/objc/Object.h OLD_FILES+=usr/include/objc/Protocol.h OLD_FILES+=usr/include/objc/runtime.h OLD_FILES+=usr/include/objc/sarray.h OLD_FILES+=usr/include/objc/thr.h OLD_FILES+=usr/include/objc/typedstream.h OLD_FILES+=usr/lib/libobjc.a OLD_FILES+=usr/lib/libobjc.so OLD_FILES+=usr/lib/libobjc_p.a OLD_FILES+=usr/libexec/cc1obj OLD_LIBS+=usr/lib/libobjc.so.4 OLD_DIRS+=usr/include/objc OLD_FILES+=usr/lib32/libobjc.a OLD_FILES+=usr/lib32/libobjc.so OLD_FILES+=usr/lib32/libobjc_p.a OLD_LIBS+=usr/lib32/libobjc.so.4 # 20110331: firmware.img created at build time OLD_FILES+=usr/share/examples/kld/firmware/fwimage/firmware.img # 20110224: sticky.8 -> sticky.7 OLD_FILES+=usr/share/man/man8/sticky.8.gz # 20110220: new clang import which bumps version from 2.8 to 2.9 OLD_FILES+=usr/include/clang/2.8/emmintrin.h OLD_FILES+=usr/include/clang/2.8/mm_malloc.h OLD_FILES+=usr/include/clang/2.8/mmintrin.h OLD_FILES+=usr/include/clang/2.8/pmmintrin.h OLD_FILES+=usr/include/clang/2.8/tmmintrin.h OLD_FILES+=usr/include/clang/2.8/xmmintrin.h OLD_DIRS+=usr/include/clang/2.8 # 20110119: netinet/sctp_cc_functions.h removed OLD_FILES+=usr/include/netinet/sctp_cc_functions.h # 20110119: Remove SYSCTL_*X* sysctl additions. OLD_FILES+=usr/share/man/man9/SYSCTL_XINT.9.gz \ usr/share/man/man9/SYSCTL_XLONG.9.gz # 20110112: Update dialog to new version, rename old libdialog to libodialog, # removing associated man pages and header files. OLD_FILES+=usr/share/man/man3/draw_shadow.3.gz \ usr/share/man/man3/draw_box.3.gz usr/share/man/man3/line_edit.3.gz \ usr/share/man/man3/strheight.3.gz usr/share/man/man3/strwidth.3.gz \ usr/share/man/man3/dialog_create_rc.3.gz \ usr/share/man/man3/dialog_yesno.3.gz usr/share/man/man3/dialog_noyes.3.gz \ usr/share/man/man3/dialog_prgbox.3.gz \ usr/share/man/man3/dialog_textbox.3.gz usr/share/man/man3/dialog_menu.3.gz \ usr/share/man/man3/dialog_checklist.3.gz \ usr/share/man/man3/dialog_radiolist.3.gz \ usr/share/man/man3/dialog_inputbox.3.gz \ usr/share/man/man3/dialog_clear_norefresh.3.gz \ usr/share/man/man3/dialog_clear.3.gz usr/share/man/man3/dialog_update.3.gz \ usr/share/man/man3/dialog_fselect.3.gz \ usr/share/man/man3/dialog_notify.3.gz \ usr/share/man/man3/dialog_mesgbox.3.gz \ usr/share/man/man3/dialog_gauge.3.gz usr/share/man/man3/init_dialog.3.gz \ usr/share/man/man3/end_dialog.3.gz usr/share/man/man3/use_helpfile.3.gz \ usr/share/man/man3/use_helpline.3.gz usr/share/man/man3/get_helpline.3.gz \ usr/share/man/man3/restore_helpline.3.gz \ usr/share/man/man3/dialog_msgbox.3.gz \ usr/share/man/man3/dialog_ftree.3.gz usr/share/man/man3/dialog_tree.3.gz \ usr/share/examples/dialog/README usr/share/examples/dialog/checklist \ usr/share/examples/dialog/ftreebox usr/share/examples/dialog/infobox \ usr/share/examples/dialog/inputbox usr/share/examples/dialog/menubox \ usr/share/examples/dialog/msgbox usr/share/examples/dialog/prgbox \ usr/share/examples/dialog/radiolist usr/share/examples/dialog/textbox \ usr/share/examples/dialog/treebox usr/share/examples/dialog/yesno \ usr/share/examples/libdialog/Makefile usr/share/examples/libdialog/check1.c\ usr/share/examples/libdialog/check2.c usr/share/examples/libdialog/check3.c\ usr/share/examples/libdialog/dselect.c \ usr/share/examples/libdialog/fselect.c \ usr/share/examples/libdialog/ftree1.c \ usr/share/examples/libdialog/ftree1.test \ usr/share/examples/libdialog/ftree2.c \ usr/share/examples/libdialog/ftree2.test \ usr/share/examples/libdialog/gauge.c usr/share/examples/libdialog/input1.c \ usr/share/examples/libdialog/input2.c usr/share/examples/libdialog/menu1.c \ usr/share/examples/libdialog/menu2.c usr/share/examples/libdialog/menu3.c \ usr/share/examples/libdialog/msg.c usr/share/examples/libdialog/prgbox.c \ usr/share/examples/libdialog/radio1.c usr/share/examples/libdialog/radio2.c\ usr/share/examples/libdialog/radio3.c usr/share/examples/libdialog/text.c \ usr/share/examples/libdialog/tree.c usr/share/examples/libdialog/yesno.c OLD_DIRS+=usr/share/examples/libdialog usr/share/examples/dialog # 20101114: Remove long-obsolete MAKEDEV.8 OLD_FILES+=usr/share/man/man8/MAKEDEV.8.gz # 20101112: vgonel(9) has gone to private API a while ago OLD_FILES+=usr/share/man/man9/vgonel.9.gz # 20101112: removed gasp.info OLD_FILES+=usr/share/info/gasp.info.gz # 20101109: machine/mutex.h removed OLD_FILES+=usr/include/machine/mutex.h # 20101109: headers moved from machine/ to x86/ .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/mptable.h .endif # 20101101: headers moved from machine/ to x86/ .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/apicreg.h OLD_FILES+=usr/include/machine/mca.h .endif # 20101020: catch up with vm_page_sleep_if_busy rename OLD_FILES+=usr/share/man/man9/vm_page_sleep_busy.9.gz # 20101018: taskqueue(9) updates OLD_FILES+=usr/share/man/man9/taskqueue_find.9.gz # 20101011: removed subblock.h from liblzma OLD_FILES+=usr/include/lzma/subblock.h # 20101002: removed manpath.config OLD_FILES+=etc/manpath.config OLD_FILES+=usr/share/examples/etc/manpath.config # 20100910: renamed sbuf_overflowed to sbuf_error OLD_FILES+=usr/share/man/man9/sbuf_overflowed.9.gz # 20100815: retired last traces of chooseproc(9) OLD_FILES+=usr/share/man/man9/chooseproc.9.gz # 20100806: removal of unused libcompat routines OLD_FILES+=usr/share/man/man3/ascftime.3.gz OLD_FILES+=usr/share/man/man3/cfree.3.gz OLD_FILES+=usr/share/man/man3/cftime.3.gz OLD_FILES+=usr/share/man/man3/getpw.3.gz # 20100801: tzdata2010k import OLD_FILES+=usr/share/zoneinfo/Pacific/Ponape OLD_FILES+=usr/share/zoneinfo/Pacific/Truk # 20100725: acpi_aiboost(4) removal. OLD_FILES+=usr/share/man/man4/acpi_aiboost.4.gz # 20100724: nfsclient/nfs_lock.h moved to nfs/nfs_lock.h OLD_FILES+=usr/include/nfsclient/nfs_lock.h # 20100720: new clang import which bumps version from 2.0 to 2.8 OLD_FILES+=usr/include/clang/2.0/emmintrin.h OLD_FILES+=usr/include/clang/2.0/mm_malloc.h OLD_FILES+=usr/include/clang/2.0/mmintrin.h OLD_FILES+=usr/include/clang/2.0/pmmintrin.h OLD_FILES+=usr/include/clang/2.0/tmmintrin.h OLD_FILES+=usr/include/clang/2.0/xmmintrin.h OLD_DIRS+=usr/include/clang/2.0 # 20100706: removed pc-sysinstall's detect-vmware.sh OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-vmware.sh # 20100701: [powerpc] removed .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/machine/intr.h .endif # 20100514: library version bump for versioned symbols for liblzma OLD_LIBS+=usr/lib/liblzma.so.0 OLD_LIBS+=usr/lib32/liblzma.so.0 # 20100511: move GCC-specific headers to /usr/include/gcc .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/emmintrin.h OLD_FILES+=usr/include/mm_malloc.h OLD_FILES+=usr/include/pmmintrin.h OLD_FILES+=usr/include/xmmintrin.h .endif .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" || ${TARGET_ARCH} == "arm" OLD_FILES+=usr/include/mmintrin.h .endif .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/altivec.h OLD_FILES+=usr/include/ppc-asm.h OLD_FILES+=usr/include/spe.h .endif # 20100416: [mips] removed .if ${TARGET_ARCH} == "mips" OLD_FILES+=usr/include/machine/psl.h .endif # 20100415: [mips] removed unused headers .if ${TARGET_ARCH} == "mips" OLD_FILES+=usr/include/machine/archtype.h OLD_FILES+=usr/include/machine/segments.h OLD_FILES+=usr/include/machine/rm7000.h OLD_FILES+=usr/include/machine/defs.h OLD_FILES+=usr/include/machine/queue.h .endif # 20100326: gcpio removal OLD_FILES+=usr/bin/gcpio OLD_FILES+=usr/share/info/cpio.info.gz OLD_FILES+=usr/share/man/man1/gcpio.1.gz # 20100322: libz update OLD_LIBS+=lib/libz.so.5 OLD_LIBS+=usr/lib32/libz.so.5 # 20100314: removal of regexp.h OLD_FILES+=usr/include/regexp.h OLD_FILES+=usr/share/man/man3/regexp.3.gz OLD_FILES+=usr/share/man/man3/regsub.3.gz # 20100303: actual removal of utmp.h OLD_FILES+=usr/include/utmp.h # 20100208: man pages moved .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/alpm.4.gz OLD_FILES+=usr/share/man/man4/i386/amdpm.4.gz OLD_FILES+=usr/share/man/man4/i386/mcd.4.gz OLD_FILES+=usr/share/man/man4/i386/padlock.4.gz OLD_FILES+=usr/share/man/man4/i386/pcf.4.gz OLD_FILES+=usr/share/man/man4/i386/scd.4.gz OLD_FILES+=usr/share/man/man4/i386/viapm.4.gz .endif # 20100122: move BSDL bc/dc USD documents to /usr/share/doc/usd OLD_FILES+=usr/share/doc/papers/bc.ascii.gz OLD_FILES+=usr/share/doc/papers/dc.ascii.gz # 20100120: replacing GNU bc/dc with BSDL versions OLD_FILES+=usr/share/examples/bc/ckbook.b OLD_FILES+=usr/share/examples/bc/pi.b OLD_FILES+=usr/share/examples/bc/primes.b OLD_FILES+=usr/share/examples/bc/twins.b OLD_FILES+=usr/share/info/dc.info.gz OLD_DIRS+=usr/share/examples/bc # 20100114: removal of ttyslot(3) OLD_FILES+=usr/share/man/man3/ttyslot.3.gz # 20100113: remove utmp.h, replace it by utmpx.h OLD_FILES+=usr/share/man/man3/login.3.gz OLD_FILES+=usr/share/man/man3/logout.3.gz OLD_FILES+=usr/share/man/man3/logwtmp.3.gz OLD_FILES+=usr/share/man/man3/ulog_endutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxline.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxuser.3.gz OLD_FILES+=usr/share/man/man3/ulog_pututxline.3.gz OLD_FILES+=usr/share/man/man3/ulog_setutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_setutxfile.3.gz OLD_FILES+=usr/share/man/man5/lastlog.5.gz OLD_FILES+=usr/share/man/man5/utmp.5.gz OLD_FILES+=usr/share/man/man5/wtmp.5.gz OLD_LIBS+=lib/libutil.so.8 OLD_LIBS+=usr/lib32/libutil.so.8 # 20100105: new userland semaphore implementation OLD_FILES+=usr/include/sys/semaphore.h # 20100103: ntptrace(8) removed OLD_FILES+=usr/sbin/ntptrace OLD_FILES+=usr/share/man/man8/ntptrace.8.gz # 20091229: remove no longer relevant examples OLD_FILES+=usr/share/examples/pppd/auth-down.sample OLD_FILES+=usr/share/examples/pppd/auth-up.sample OLD_FILES+=usr/share/examples/pppd/chap-secrets.sample OLD_FILES+=usr/share/examples/pppd/chat.sh.sample OLD_FILES+=usr/share/examples/pppd/ip-down.sample OLD_FILES+=usr/share/examples/pppd/ip-up.sample OLD_FILES+=usr/share/examples/pppd/options.sample OLD_FILES+=usr/share/examples/pppd/pap-secrets.sample OLD_FILES+=usr/share/examples/pppd/ppp.deny.sample OLD_FILES+=usr/share/examples/pppd/ppp.shells.sample OLD_DIRS+=usr/share/examples/pppd OLD_FILES+=usr/share/examples/slattach/unit-command.sh OLD_DIRS+=usr/share/examples/slattach OLD_FILES+=usr/share/examples/sliplogin/slip.hosts OLD_FILES+=usr/share/examples/sliplogin/slip.login OLD_FILES+=usr/share/examples/sliplogin/slip.logout OLD_FILES+=usr/share/examples/sliplogin/slip.slparms OLD_DIRS+=usr/share/examples/sliplogin OLD_FILES+=usr/share/examples/startslip/sldown.sh OLD_FILES+=usr/share/examples/startslip/slip.sh OLD_FILES+=usr/share/examples/startslip/slup.sh OLD_DIRS+=usr/share/examples/startslip # 20091202: unify rc.firewall and rc.firewall6. OLD_FILES+=etc/rc.d/ip6fw OLD_FILES+=etc/rc.firewall6 OLD_FILES+=usr/share/examples/etc/rc.firewall6 # 20091117: removal of rc.early(8) link OLD_FILES+=usr/share/man/man8/rc.early.8.gz # 20091117: usr/share/zoneinfo/GMT link removed OLD_FILES+=usr/share/zoneinfo/GMT # 20091027: pselect.3 implemented as syscall OLD_FILES+=usr/share/man/man3/pselect.3.gz # 20091005: fusword.9 and susword.9 removed OLD_FILES+=usr/share/man/man9/fusword.9.gz OLD_FILES+=usr/share/man/man9/susword.9.gz # 20090909: vesa and dpms promoted to be i386/amd64 common OLD_FILES+=usr/include/machine/pc/vesa.h OLD_FILES+=usr/share/man/man4/i386/dpms.4.gz # 20090904: remove lukemftpd OLD_FILES+=usr/libexec/lukemftpd OLD_FILES+=usr/share/man/man5/ftpd.conf.5.gz OLD_FILES+=usr/share/man/man5/ftpusers.5.gz OLD_FILES+=usr/share/man/man8/lukemftpd.8.gz # 20090902: BSD.{x11,x11-4}.dist are dead and BSD.local.dist lives in ports/ OLD_FILES+=etc/mtree/BSD.local.dist OLD_FILES+=etc/mtree/BSD.x11.dist OLD_FILES+=etc/mtree/BSD.x11-4.dist # 20090812: net80211 documentation overhaul OLD_FILES+=usr/share/man/man9/ieee80211_add_rates.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_add_xrates.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_alloc_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_begin_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_cfgget.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_cfgset.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_chan2ieee.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_chan2mode.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_create_ibss.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_crypto_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_crypto_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_decap.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_dump_pkt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_dup_bss.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_encap.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_end_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_find_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_fix_rate.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_free_allnodes.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_ieee2mhz.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_ioctl.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_lookup_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media2rate.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_change.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_init.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_status.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_mhz2ieee.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_next_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_lateattach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_print_essid.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_proto_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_proto_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_rate2media.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_recv_mgmt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_send_mgmt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_setmode.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_timeout_nodes.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_watchdog.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_wep_crypt.9.gz # 20090801: vimage.h removed in favour of vnet.h OLD_FILES+=usr/include/sys/vimage.h # 20101208: libbsnmp was moved to usr/lib OLD_LIBS+=lib/libbsnmp.so.5 # 20090719: library version bump for 8.0 OLD_LIBS+=lib/libalias.so.6 OLD_LIBS+=lib/libavl.so.1 OLD_LIBS+=lib/libbegemot.so.3 OLD_LIBS+=lib/libbsdxml.so.3 OLD_LIBS+=lib/libbsnmp.so.4 OLD_LIBS+=lib/libcam.so.4 OLD_LIBS+=lib/libcrypt.so.4 OLD_LIBS+=lib/libcrypto.so.5 OLD_LIBS+=lib/libctf.so.1 OLD_LIBS+=lib/libdevstat.so.6 OLD_LIBS+=lib/libdtrace.so.1 OLD_LIBS+=lib/libedit.so.6 OLD_LIBS+=lib/libgeom.so.4 OLD_LIBS+=lib/libipsec.so.3 OLD_LIBS+=lib/libipx.so.4 OLD_LIBS+=lib/libkiconv.so.3 OLD_LIBS+=lib/libkvm.so.4 OLD_LIBS+=lib/libmd.so.4 OLD_LIBS+=lib/libncurses.so.7 OLD_LIBS+=lib/libncursesw.so.7 OLD_LIBS+=lib/libnvpair.so.1 OLD_LIBS+=lib/libpcap.so.6 OLD_LIBS+=lib/libreadline.so.7 OLD_LIBS+=lib/libsbuf.so.4 OLD_LIBS+=lib/libufs.so.4 OLD_LIBS+=lib/libumem.so.1 OLD_LIBS+=lib/libutil.so.7 OLD_LIBS+=lib/libuutil.so.1 OLD_LIBS+=lib/libz.so.4 OLD_LIBS+=lib/libzfs.so.1 OLD_LIBS+=lib/libzpool.so.1 OLD_LIBS+=usr/lib/libarchive.so.4 OLD_LIBS+=usr/lib/libauditd.so.4 OLD_LIBS+=usr/lib/libbluetooth.so.3 OLD_LIBS+=usr/lib/libbsm.so.2 OLD_LIBS+=usr/lib/libbz2.so.3 OLD_LIBS+=usr/lib/libcalendar.so.4 OLD_LIBS+=usr/lib/libcom_err.so.4 OLD_LIBS+=usr/lib/libdevinfo.so.4 OLD_LIBS+=usr/lib/libdialog.so.6 OLD_LIBS+=usr/lib/libdwarf.so.1 OLD_LIBS+=usr/lib/libfetch.so.5 OLD_LIBS+=usr/lib/libform.so.4 OLD_LIBS+=usr/lib/libformw.so.4 OLD_LIBS+=usr/lib/libftpio.so.7 OLD_LIBS+=usr/lib/libgnuregex.so.4 OLD_LIBS+=usr/lib/libgpib.so.2 OLD_LIBS+=usr/lib/libhistory.so.7 OLD_LIBS+=usr/lib/libmagic.so.3 OLD_LIBS+=usr/lib/libmemstat.so.2 OLD_LIBS+=usr/lib/libmenu.so.4 OLD_LIBS+=usr/lib/libmenuw.so.4 OLD_LIBS+=usr/lib/libmilter.so.4 OLD_LIBS+=usr/lib/libncp.so.3 OLD_LIBS+=usr/lib/libnetgraph.so.3 OLD_LIBS+=usr/lib/libngatm.so.3 OLD_LIBS+=usr/lib/libobjc.so.3 OLD_LIBS+=usr/lib/libopie.so.5 OLD_LIBS+=usr/lib/libpam.so.4 OLD_LIBS+=usr/lib/libpanel.so.4 OLD_LIBS+=usr/lib/libpanelw.so.4 OLD_LIBS+=usr/lib/libpmc.so.4 OLD_LIBS+=usr/lib/libproc.so.1 OLD_LIBS+=usr/lib/libradius.so.3 OLD_LIBS+=usr/lib/librpcsvc.so.4 OLD_LIBS+=usr/lib/libsdp.so.3 OLD_LIBS+=usr/lib/libsmb.so.3 OLD_LIBS+=usr/lib/libssh.so.4 OLD_LIBS+=usr/lib/libssl.so.5 OLD_LIBS+=usr/lib/libtacplus.so.3 OLD_LIBS+=usr/lib/libugidfw.so.3 OLD_LIBS+=usr/lib/libusb.so.1 OLD_LIBS+=usr/lib/libusbhid.so.3 OLD_LIBS+=usr/lib/libvgl.so.5 OLD_LIBS+=usr/lib/libwrap.so.5 OLD_LIBS+=usr/lib/libypclnt.so.3 OLD_LIBS+=usr/lib/pam_chroot.so.4 OLD_LIBS+=usr/lib/pam_deny.so.4 OLD_LIBS+=usr/lib/pam_echo.so.4 OLD_LIBS+=usr/lib/pam_exec.so.4 OLD_LIBS+=usr/lib/pam_ftpusers.so.4 OLD_LIBS+=usr/lib/pam_group.so.4 OLD_LIBS+=usr/lib/pam_guest.so.4 OLD_LIBS+=usr/lib/pam_krb5.so.4 OLD_LIBS+=usr/lib/pam_ksu.so.4 OLD_LIBS+=usr/lib/pam_lastlog.so.4 OLD_LIBS+=usr/lib/pam_login_access.so.4 OLD_LIBS+=usr/lib/pam_nologin.so.4 OLD_LIBS+=usr/lib/pam_opie.so.4 OLD_LIBS+=usr/lib/pam_opieaccess.so.4 OLD_LIBS+=usr/lib/pam_passwdqc.so.4 OLD_LIBS+=usr/lib/pam_permit.so.4 OLD_LIBS+=usr/lib/pam_radius.so.4 OLD_LIBS+=usr/lib/pam_rhosts.so.4 OLD_LIBS+=usr/lib/pam_rootok.so.4 OLD_LIBS+=usr/lib/pam_securetty.so.4 OLD_LIBS+=usr/lib/pam_self.so.4 OLD_LIBS+=usr/lib/pam_ssh.so.4 OLD_LIBS+=usr/lib/pam_tacplus.so.4 OLD_LIBS+=usr/lib/pam_unix.so.4 OLD_LIBS+=usr/lib/snmp_atm.so.5 OLD_LIBS+=usr/lib/snmp_bridge.so.5 OLD_LIBS+=usr/lib/snmp_hostres.so.5 OLD_LIBS+=usr/lib/snmp_mibII.so.5 OLD_LIBS+=usr/lib/snmp_netgraph.so.5 OLD_LIBS+=usr/lib/snmp_pf.so.5 OLD_LIBS+=usr/lib32/libalias.so.6 OLD_LIBS+=usr/lib32/libarchive.so.4 OLD_LIBS+=usr/lib32/libauditd.so.4 OLD_LIBS+=usr/lib32/libavl.so.1 OLD_LIBS+=usr/lib32/libbegemot.so.3 OLD_LIBS+=usr/lib32/libbluetooth.so.3 OLD_LIBS+=usr/lib32/libbsdxml.so.3 OLD_LIBS+=usr/lib32/libbsm.so.2 OLD_LIBS+=usr/lib32/libbsnmp.so.4 OLD_LIBS+=usr/lib32/libbz2.so.3 OLD_LIBS+=usr/lib32/libcalendar.so.4 OLD_LIBS+=usr/lib32/libcam.so.4 OLD_LIBS+=usr/lib32/libcom_err.so.4 OLD_LIBS+=usr/lib32/libcrypt.so.4 OLD_LIBS+=usr/lib32/libcrypto.so.5 OLD_LIBS+=usr/lib32/libctf.so.1 OLD_LIBS+=usr/lib32/libdevinfo.so.4 OLD_LIBS+=usr/lib32/libdevstat.so.6 OLD_LIBS+=usr/lib32/libdialog.so.6 OLD_LIBS+=usr/lib32/libdtrace.so.1 OLD_LIBS+=usr/lib32/libdwarf.so.1 OLD_LIBS+=usr/lib32/libedit.so.6 OLD_LIBS+=usr/lib32/libfetch.so.5 OLD_LIBS+=usr/lib32/libform.so.4 OLD_LIBS+=usr/lib32/libformw.so.4 OLD_LIBS+=usr/lib32/libftpio.so.7 OLD_LIBS+=usr/lib32/libgeom.so.4 OLD_LIBS+=usr/lib32/libgnuregex.so.4 OLD_LIBS+=usr/lib32/libgpib.so.2 OLD_LIBS+=usr/lib32/libhistory.so.7 OLD_LIBS+=usr/lib32/libipsec.so.3 OLD_LIBS+=usr/lib32/libipx.so.4 OLD_LIBS+=usr/lib32/libkiconv.so.3 OLD_LIBS+=usr/lib32/libkvm.so.4 OLD_LIBS+=usr/lib32/libmagic.so.3 OLD_LIBS+=usr/lib32/libmd.so.4 OLD_LIBS+=usr/lib32/libmemstat.so.2 OLD_LIBS+=usr/lib32/libmenu.so.4 OLD_LIBS+=usr/lib32/libmenuw.so.4 OLD_LIBS+=usr/lib32/libmilter.so.4 OLD_LIBS+=usr/lib32/libncp.so.3 OLD_LIBS+=usr/lib32/libncurses.so.7 OLD_LIBS+=usr/lib32/libncursesw.so.7 OLD_LIBS+=usr/lib32/libnetgraph.so.3 OLD_LIBS+=usr/lib32/libngatm.so.3 OLD_LIBS+=usr/lib32/libnvpair.so.1 OLD_LIBS+=usr/lib32/libobjc.so.3 OLD_LIBS+=usr/lib32/libopie.so.5 OLD_LIBS+=usr/lib32/libpam.so.4 OLD_LIBS+=usr/lib32/libpanel.so.4 OLD_LIBS+=usr/lib32/libpanelw.so.4 OLD_LIBS+=usr/lib32/libpcap.so.6 OLD_LIBS+=usr/lib32/libpmc.so.4 OLD_LIBS+=usr/lib32/libproc.so.1 OLD_LIBS+=usr/lib32/libradius.so.3 OLD_LIBS+=usr/lib32/libreadline.so.7 OLD_LIBS+=usr/lib32/librpcsvc.so.4 OLD_LIBS+=usr/lib32/libsbuf.so.4 OLD_LIBS+=usr/lib32/libsdp.so.3 OLD_LIBS+=usr/lib32/libsmb.so.3 OLD_LIBS+=usr/lib32/libssh.so.4 OLD_LIBS+=usr/lib32/libssl.so.5 OLD_LIBS+=usr/lib32/libtacplus.so.3 OLD_LIBS+=usr/lib32/libufs.so.4 OLD_LIBS+=usr/lib32/libugidfw.so.3 OLD_LIBS+=usr/lib32/libumem.so.1 OLD_LIBS+=usr/lib32/libusb.so.1 OLD_LIBS+=usr/lib32/libusbhid.so.3 OLD_LIBS+=usr/lib32/libutil.so.7 OLD_LIBS+=usr/lib32/libuutil.so.1 OLD_LIBS+=usr/lib32/libvgl.so.5 OLD_LIBS+=usr/lib32/libwrap.so.5 OLD_LIBS+=usr/lib32/libypclnt.so.3 OLD_LIBS+=usr/lib32/libz.so.4 OLD_LIBS+=usr/lib32/libzfs.so.1 OLD_LIBS+=usr/lib32/libzpool.so.1 OLD_LIBS+=usr/lib32/pam_chroot.so.4 OLD_LIBS+=usr/lib32/pam_deny.so.4 OLD_LIBS+=usr/lib32/pam_echo.so.4 OLD_LIBS+=usr/lib32/pam_exec.so.4 OLD_LIBS+=usr/lib32/pam_ftpusers.so.4 OLD_LIBS+=usr/lib32/pam_group.so.4 OLD_LIBS+=usr/lib32/pam_guest.so.4 OLD_LIBS+=usr/lib32/pam_krb5.so.4 OLD_LIBS+=usr/lib32/pam_ksu.so.4 OLD_LIBS+=usr/lib32/pam_lastlog.so.4 OLD_LIBS+=usr/lib32/pam_login_access.so.4 OLD_LIBS+=usr/lib32/pam_nologin.so.4 OLD_LIBS+=usr/lib32/pam_opie.so.4 OLD_LIBS+=usr/lib32/pam_opieaccess.so.4 OLD_LIBS+=usr/lib32/pam_passwdqc.so.4 OLD_LIBS+=usr/lib32/pam_permit.so.4 OLD_LIBS+=usr/lib32/pam_radius.so.4 OLD_LIBS+=usr/lib32/pam_rhosts.so.4 OLD_LIBS+=usr/lib32/pam_rootok.so.4 OLD_LIBS+=usr/lib32/pam_securetty.so.4 OLD_LIBS+=usr/lib32/pam_self.so.4 OLD_LIBS+=usr/lib32/pam_ssh.so.4 OLD_LIBS+=usr/lib32/pam_tacplus.so.4 OLD_LIBS+=usr/lib32/pam_unix.so.4 # 20090718: the gdm pam.d file is no longer required. OLD_FILES+=etc/pam.d/gdm # 20090714: net_add_domain(9) renamed to domain_add(9) OLD_FILES+=usr/share/man/man9/net_add_domain.9.gz # 20090713: vimage container structs removed. OLD_FILES+=usr/include/netinet/vinet.h OLD_FILES+=usr/include/netinet6/vinet6.h OLD_FILES+=usr/include/netipsec/vipsec.h # 20090712: ieee80211.4 -> net80211.4 OLD_FILES+=usr/share/man/man4/ieee80211.4.gz # 20090711: typo fixed, kproc_resume,.9 -> kproc_resume.9 OLD_FILES+=usr/share/man/man9/kproc_resume,.9.gz # 20090709: msgctl.3 msgget.3 msgrcv.3 msgsnd.3 manual pages moved OLD_FILES+=usr/share/man/man3/msgctl.3.gz OLD_FILES+=usr/share/man/man3/msgget.3.gz OLD_FILES+=usr/share/man/man3/msgrcv.3.gz OLD_FILES+=usr/share/man/man3/msgsnd.3.gz # 20090630: old kernel RPC implementation removal OLD_FILES+=usr/include/nfs/rpcv2.h # 20090624: update usbdi(9) OLD_FILES+=usr/share/man/man9/usbd_abort_default_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_abort_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_alloc_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_alloc_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall_async.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_toggle.9.gz OLD_FILES+=usr/share/man/man9/usbd_close_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_device2interface_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_do_request_async.9.gz OLD_FILES+=usr/share/man/man9/usbd_do_request_flags_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_endpoint_count.9.gz OLD_FILES+=usr/share/man/man9/usbd_find_edesc.9.gz OLD_FILES+=usr/share/man/man9/usbd_find_idesc.9.gz OLD_FILES+=usr/share/man/man9/usbd_free_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_free_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_desc.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_desc_full.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_device_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_endpoint_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_interface_altindex.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_interface_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_no_alts.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_quirks.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_speed.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_string.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_string_desc.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_xfer_status.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface2device_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface2endpoint_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface_count.9.gz OLD_FILES+=usr/share/man/man9/usbd_open_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_open_pipe_intr.9.gz OLD_FILES+=usr/share/man/man9/usbd_pipe2device_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_config_index.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_config_no.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_interface.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_default_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_isoc_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_sync_transfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_transfer.9.gz OLD_FILES+=usr/share/man/man9/usb_find_desc.9.gz # 20090623: number of headers needed for a usb driver reduced OLD_FILES+=usr/include/dev/usb/usb_defs.h OLD_FILES+=usr/include/dev/usb/usb_error.h OLD_FILES+=usr/include/dev/usb/usb_handle_request.h OLD_FILES+=usr/include/dev/usb/usb_hid.h OLD_FILES+=usr/include/dev/usb/usb_lookup.h OLD_FILES+=usr/include/dev/usb/usb_mfunc.h OLD_FILES+=usr/include/dev/usb/usb_parse.h OLD_FILES+=usr/include/dev/usb/usb_revision.h # 20090609: devclass_add_driver is no longer public OLD_FILES+=usr/share/man/man9/devclass_add_driver.9.gz OLD_FILES+=usr/share/man/man9/devclass_delete_driver.9.gz OLD_FILES+=usr/share/man/man9/devclass_find_driver.9.gz # 20090605: removal of clists OLD_FILES+=usr/include/sys/clist.h # 20090602: removal of window(1) OLD_FILES+=usr/bin/window OLD_FILES+=usr/share/man/man1/window.1.gz # 20090531: bind 9.6.1rc1 import OLD_LIBS+=usr/lib/liblwres.so.30 # 20090530: removal of early.sh OLD_FILES+=etc/rc.d/early.sh # 20090527: renaming of S{LIST,TAILQ}_REMOVE_NEXT() to _REMOVE_AFTER() OLD_FILES+=usr/share/man/man3/SLIST_REMOVE_NEXT.3.gz OLD_FILES+=usr/share/man/man3/STAILQ_REMOVE_NEXT.3.gz # 20090527: removal of legacy USB stack OLD_FILES+=usr/include/legacy/dev/usb/dsbr100io.h OLD_FILES+=usr/include/legacy/dev/usb/ehcireg.h OLD_FILES+=usr/include/legacy/dev/usb/ehcivar.h OLD_FILES+=usr/include/legacy/dev/usb/hid.h OLD_FILES+=usr/include/legacy/dev/usb/if_urtwreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_urtwvar.h OLD_FILES+=usr/include/legacy/dev/usb/ohcireg.h OLD_FILES+=usr/include/legacy/dev/usb/ohcivar.h OLD_FILES+=usr/include/legacy/dev/usb/rio500_usb.h OLD_FILES+=usr/include/legacy/dev/usb/rt2573_ucode.h OLD_FILES+=usr/include/legacy/dev/usb/sl811hsreg.h OLD_FILES+=usr/include/legacy/dev/usb/sl811hsvar.h OLD_FILES+=usr/include/legacy/dev/usb/ubser.h OLD_FILES+=usr/include/legacy/dev/usb/ucomvar.h OLD_FILES+=usr/include/legacy/dev/usb/udbp.h OLD_FILES+=usr/include/legacy/dev/usb/uftdireg.h OLD_FILES+=usr/include/legacy/dev/usb/ugraphire_rdesc.h OLD_FILES+=usr/include/legacy/dev/usb/uhcireg.h OLD_FILES+=usr/include/legacy/dev/usb/uhcivar.h OLD_FILES+=usr/include/legacy/dev/usb/usb.h OLD_FILES+=usr/include/legacy/dev/usb/usb_mem.h OLD_FILES+=usr/include/legacy/dev/usb/usb_port.h OLD_FILES+=usr/include/legacy/dev/usb/usb_quirks.h OLD_FILES+=usr/include/legacy/dev/usb/usbcdc.h OLD_FILES+=usr/include/legacy/dev/usb/usbdi.h OLD_FILES+=usr/include/legacy/dev/usb/usbdi_util.h OLD_FILES+=usr/include/legacy/dev/usb/usbdivar.h OLD_FILES+=usr/include/legacy/dev/usb/usbhid.h OLD_FILES+=usr/include/legacy/dev/usb/uxb360gp_rdesc.h OLD_DIRS+=usr/include/legacy/dev/usb OLD_DIRS+=usr/include/legacy/dev OLD_DIRS+=usr/include/legacy # 20090526: removal of makekey(8) OLD_FILES+=usr/libexec/makekey OLD_FILES+=usr/share/man/man8/makekey.8.gz # 20090522: removal of University of Michigan NFSv4 client OLD_FILES+=etc/rc.d/idmapd OLD_FILES+=sbin/idmapd OLD_FILES+=sbin/mount_nfs4 OLD_FILES+=usr/share/man/man8/idmapd.8.gz OLD_FILES+=usr/share/man/man8/mount_nfs4.8.gz # 20090513: removal of legacy versions of USB network interface drivers OLD_FILES+=usr/include/legacy/dev/usb/if_upgtvar.h OLD_FILES+=usr/include/legacy/dev/usb/usb_ethersubr.h # 20090417: removal of legacy versions of USB network interface drivers OLD_FILES+=usr/include/legacy/dev/usb/if_auereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_axereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_cdcereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_cuereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_kuereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_ruereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_rumreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_rumvar.h OLD_FILES+=usr/include/legacy/dev/usb/if_udavreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_uralreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_uralvar.h OLD_FILES+=usr/include/legacy/dev/usb/if_zydfw.h OLD_FILES+=usr/include/legacy/dev/usb/if_zydreg.h OLD_FILES+=usr/include/legacy/dev/usb/kue_fw.h # 20090416: removal of ar(4), ray(4), sr(4), raycontrol(8) OLD_FILES+=usr/sbin/raycontrol OLD_FILES+=usr/share/man/man4/i386/ar.4.gz OLD_FILES+=usr/share/man/man4/i386/ray.4.gz OLD_FILES+=usr/share/man/man4/i386/sr.4.gz OLD_FILES+=usr/share/man/man8/raycontrol.8.gz # 20090410: VOP_LEASE.9 removed OLD_FILES+=usr/share/man/man9/VOP_LEASE.9.gz # 20090406: usb_sw_transfer.h removed OLD_FILES+=usr/include/dev/usb/usb_sw_transfer.h # 20090405: removal of if_ppp(4) and if_sl(4) OLD_FILES+=sbin/slattach rescue/slattach OLD_FILES+=sbin/startslip rescue/startslip OLD_FILES+=usr/include/net/if_ppp.h OLD_FILES+=usr/include/net/if_pppvar.h OLD_FILES+=usr/include/net/if_slvar.h OLD_FILES+=usr/include/net/ppp_comp.h OLD_FILES+=usr/include/net/slip.h OLD_FILES+=usr/sbin/sliplogin OLD_FILES+=usr/sbin/slstat OLD_FILES+=usr/sbin/pppd OLD_FILES+=usr/sbin/pppstats OLD_FILES+=usr/share/man/man1/startslip.1.gz OLD_FILES+=usr/share/man/man4/if_ppp.4.gz OLD_FILES+=usr/share/man/man4/if_sl.4.gz OLD_FILES+=usr/share/man/man4/ppp.4.gz OLD_FILES+=usr/share/man/man4/sl.4.gz OLD_FILES+=usr/share/man/man8/pppd.8.gz OLD_FILES+=usr/share/man/man8/pppstats.8.gz OLD_FILES+=usr/share/man/man8/slattach.8.gz OLD_FILES+=usr/share/man/man8/slip.8.gz OLD_FILES+=usr/share/man/man8/sliplogin.8.gz OLD_FILES+=usr/share/man/man8/slstat.8.gz # 20090321: libpcap upgraded to 1.0.0 OLD_LIBS+=lib/libpcap.so.5 OLD_LIBS+=usr/lib32/libpcap.so.5 # 20090319: uscanner(4) has been removed OLD_FILES+=usr/share/man/man4/uscanner.4.gz # 20090313: k8temp(4) renamed to amdtemp(4) OLD_FILES+=usr/share/man/man4/k8temp.4.gz # 20090308: libusb.so.1 renamed OLD_LIBS+=usr/lib/libusb20.so.1 OLD_FILES+=usr/lib/libusb20.a OLD_FILES+=usr/lib/libusb20.so OLD_FILES+=usr/lib/libusb20_p.a OLD_FILES+=usr/include/libusb20_compat01.h OLD_FILES+=usr/include/libusb20_compat10.h OLD_LIBS+=usr/lib32/libusb20.so.1 OLD_FILES+=usr/lib32/libusb20.a OLD_FILES+=usr/lib32/libusb20.so OLD_FILES+=usr/lib32/libusb20_p.a # 20090226: libmp(3) functions renamed OLD_LIBS+=usr/lib/libmp.so.6 OLD_LIBS+=usr/lib32/libmp.so.6 # 20090223: changeover of USB stacks OLD_FILES+=usr/include/dev/usb2/include/ufm2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/urio2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/usb2_cdc.h OLD_FILES+=usr/include/dev/usb2/include/usb2_defs.h OLD_FILES+=usr/include/dev/usb2/include/usb2_devid.h OLD_FILES+=usr/include/dev/usb2/include/usb2_devtable.h OLD_FILES+=usr/include/dev/usb2/include/usb2_endian.h OLD_FILES+=usr/include/dev/usb2/include/usb2_error.h OLD_FILES+=usr/include/dev/usb2/include/usb2_hid.h OLD_FILES+=usr/include/dev/usb2/include/usb2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/usb2_mfunc.h OLD_FILES+=usr/include/dev/usb2/include/usb2_revision.h OLD_FILES+=usr/include/dev/usb2/include/usb2_standard.h OLD_DIRS+=usr/include/dev/usb2/include OLD_DIRS+=usr/include/dev/usb2 OLD_FILES+=usr/include/dev/usb/dsbr100io.h OLD_FILES+=usr/include/dev/usb/ehcireg.h OLD_FILES+=usr/include/dev/usb/ehcivar.h OLD_FILES+=usr/include/dev/usb/hid.h OLD_FILES+=usr/include/dev/usb/if_auereg.h OLD_FILES+=usr/include/dev/usb/if_axereg.h OLD_FILES+=usr/include/dev/usb/if_cdcereg.h OLD_FILES+=usr/include/dev/usb/if_cuereg.h OLD_FILES+=usr/include/dev/usb/if_kuereg.h OLD_FILES+=usr/include/dev/usb/if_ruereg.h OLD_FILES+=usr/include/dev/usb/if_rumreg.h OLD_FILES+=usr/include/dev/usb/if_rumvar.h OLD_FILES+=usr/include/dev/usb/if_udavreg.h OLD_FILES+=usr/include/dev/usb/if_upgtvar.h OLD_FILES+=usr/include/dev/usb/if_uralreg.h OLD_FILES+=usr/include/dev/usb/if_uralvar.h OLD_FILES+=usr/include/dev/usb/if_urtwreg.h OLD_FILES+=usr/include/dev/usb/if_urtwvar.h OLD_FILES+=usr/include/dev/usb/if_zydfw.h OLD_FILES+=usr/include/dev/usb/if_zydreg.h OLD_FILES+=usr/include/dev/usb/kue_fw.h OLD_FILES+=usr/include/dev/usb/ohcireg.h OLD_FILES+=usr/include/dev/usb/ohcivar.h OLD_FILES+=usr/include/dev/usb/rio500_usb.h OLD_FILES+=usr/include/dev/usb/rt2573_ucode.h OLD_FILES+=usr/include/dev/usb/sl811hsreg.h OLD_FILES+=usr/include/dev/usb/sl811hsvar.h OLD_FILES+=usr/include/dev/usb/ubser.h OLD_FILES+=usr/include/dev/usb/ucomvar.h OLD_FILES+=usr/include/dev/usb/udbp.h OLD_FILES+=usr/include/dev/usb/uftdireg.h OLD_FILES+=usr/include/dev/usb/ugraphire_rdesc.h OLD_FILES+=usr/include/dev/usb/uhcireg.h OLD_FILES+=usr/include/dev/usb/uhcivar.h OLD_FILES+=usr/include/dev/usb/usb_ethersubr.h OLD_FILES+=usr/include/dev/usb/usb_mem.h OLD_FILES+=usr/include/dev/usb/usb_port.h OLD_FILES+=usr/include/dev/usb/usb_quirks.h OLD_FILES+=usr/include/dev/usb/usbcdc.h OLD_FILES+=usr/include/dev/usb/usbdivar.h OLD_FILES+=usr/include/dev/usb/uxb360gp_rdesc.h OLD_FILES+=usr/sbin/usbdevs OLD_FILES+=usr/share/man/man8/usbdevs.8.gz # 20090203: removal of pccard header files OLD_FILES+=usr/include/pccard/cardinfo.h OLD_FILES+=usr/include/pccard/cis.h OLD_DIRS+=usr/include/pccard # 20090203: adding_user.8 moved to adding_user.7 OLD_FILES+=usr/share/man/man8/adding_user.8.gz # 20090122: tzdata2009a import OLD_FILES+=usr/share/zoneinfo/Asia/Katmandu # 20090102: file 4.26 import OLD_FILES+=usr/share/misc/magic.mime OLD_FILES+=usr/share/misc/magic.mime.mgc # 20081223: bind 9.4.3 import, nsupdate.8 moved to nsupdate.1 OLD_FILES+=usr/share/man/man8/nsupdate.8.gz # 20081223: ipprotosw.h removed OLD_FILES+=usr/include/netinet/ipprotosw.h # 20081123: vfs_mountedon.9 removed OLD_FILES+=usr/share/man/man9/vfs_mountedon.9.gz # 20081023: FREE.9 and MALLOC.9 removed OLD_FILES+=usr/share/man/man9/FREE.9.gz OLD_FILES+=usr/share/man/man9/MALLOC.9.gz # 20080928: removal of inaccurate device_ids(9) manual page OLD_FILES+=usr/share/man/man9/device_ids.9.gz OLD_FILES+=usr/share/man/man9/major.9.gz OLD_FILES+=usr/share/man/man9/minor.9.gz OLD_FILES+=usr/share/man/man9/umajor.9.gz OLD_FILES+=usr/share/man/man9/uminor.9.gz # 20080917: removal of manpage for axed kernel primitive suser(9) OLD_FILES+=usr/share/man/man9/suser.9.gz OLD_FILES+=usr/share/man/man9/suser_cred.9.gz # 20080913: pax removed from rescue OLD_FILES+=rescue/pax # 20080823: removal of unneeded pt_chown, to implement grantpt(3) OLD_FILES+=usr/libexec/pt_chown # 20080822: ntp 4.2.4p5 import OLD_FILES+=usr/share/doc/ntp/driver23.html OLD_FILES+=usr/share/doc/ntp/driver24.html # 20080821: several man pages moved from man4.i386 to man4 .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/acpi_aiboost.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_asus.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_fujitsu.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_ibm.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_panasonic.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_sony.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_toshiba.4.gz OLD_FILES+=usr/share/man/man4/i386/ichwd.4.gz OLD_FILES+=usr/share/man/man4/i386/if_ndis.4.gz OLD_FILES+=usr/share/man/man4/i386/io.4.gz OLD_FILES+=usr/share/man/man4/i386/linux.4.gz OLD_FILES+=usr/share/man/man4/i386/ndis.4.gz .endif # 20080820: MPSAFE TTY layer integrated OLD_FILES+=usr/include/sys/linedisc.h OLD_FILES+=usr/share/man/man3/posix_openpt.3.gz # 20080725: sgtty.h removed OLD_FILES+=usr/include/sgtty.h # 20080706: bsdlabel(8) removed on powerpc .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=sbin/bsdlabel OLD_FILES+=usr/share/man/man8/bsdlabel.8.gz .endif # 20080704: sbsh(4) removed OLD_FILES+=usr/share/man/man4/if_sbsh.4.gz OLD_FILES+=usr/share/man/man4/sbsh.4.gz # 20080704: cnw(4) removed OLD_FILES+=usr/share/man/man4/if_cnw.4.gz OLD_FILES+=usr/share/man/man4/cnw.4.gz # 20080704: oltr(4) removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/if_oltr.4.gz OLD_FILES+=usr/share/man/man4/i386/oltr.4.gz .endif # 20080704: arl(4) removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/sbin/arlcontrol OLD_FILES+=usr/share/man/man4/i386/arl.4.gz OLD_FILES+=usr/share/man/man8/arlcontrol.8.gz .endif # 20080703: sunlabel only for sparc64 .if ${TARGET_ARCH} != "sparc64" OLD_FILES+=sbin/sunlabel OLD_FILES+=usr/share/man/man8/sunlabel.8.gz .endif # 20080701: wpa_supplicant.conf moved to share/examples/etc/ OLD_FILES+=usr/share/examples/wpa_supplicant/wpa_supplicant.conf OLD_DIRS+=usr/share/examples/wpa_supplicant # 20080614: pecoff image activator removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/pecoff_machdep.h .endif # 20080614: sgtty removed OLD_FILES+=usr/include/sys/ttychars.h OLD_FILES+=usr/include/sys/ttydev.h OLD_FILES+=usr/share/man/man3/gtty.3.gz OLD_FILES+=usr/share/man/man3/stty.3.gz # 20080609: gpt(8) removed OLD_FILES+=sbin/gpt OLD_FILES+=usr/share/man/man8/gpt.8.gz # 20080525: I4B removed OLD_FILES+=etc/isdn/answer OLD_FILES+=etc/isdn/isdntel OLD_FILES+=etc/isdn/record OLD_FILES+=etc/isdn/tell OLD_FILES+=etc/isdn/tell-record OLD_FILES+=etc/isdn/unknown_incoming OLD_FILES+=etc/isdn/holidays.D OLD_FILES+=etc/isdn/isdnd.rates.A OLD_FILES+=etc/isdn/isdnd.rates.D OLD_FILES+=etc/isdn/isdnd.rates.F OLD_FILES+=etc/isdn/isdnd.rates.L OLD_FILES+=etc/isdn/isdnd.rates.UK.BT OLD_FILES+=etc/isdn/isdnd.rc.sample OLD_FILES+=etc/isdn/isdntel.alias.sample OLD_DIRS+=etc/isdn OLD_FILES+=etc/rc.d/isdnd OLD_FILES+=usr/include/i4b/i4b_cause.h OLD_FILES+=usr/include/i4b/i4b_debug.h OLD_FILES+=usr/include/i4b/i4b_ioctl.h OLD_FILES+=usr/include/i4b/i4b_rbch_ioctl.h OLD_FILES+=usr/include/i4b/i4b_tel_ioctl.h OLD_FILES+=usr/include/i4b/i4b_trace.h OLD_DIRS+=usr/include/i4b OLD_FILES+=usr/sbin/dtmfdecode OLD_FILES+=usr/sbin/g711conv OLD_FILES+=usr/sbin/isdnd OLD_FILES+=usr/sbin/isdndebug OLD_FILES+=usr/sbin/isdndecode OLD_FILES+=usr/sbin/isdnmonitor OLD_FILES+=usr/sbin/isdnphone OLD_FILES+=usr/sbin/isdntel OLD_FILES+=usr/sbin/isdntelctl OLD_FILES+=usr/sbin/isdntrace OLD_FILES+=usr/share/isdn/0.al OLD_FILES+=usr/share/isdn/1.al OLD_FILES+=usr/share/isdn/2.al OLD_FILES+=usr/share/isdn/3.al OLD_FILES+=usr/share/isdn/4.al OLD_FILES+=usr/share/isdn/5.al OLD_FILES+=usr/share/isdn/6.al OLD_FILES+=usr/share/isdn/7.al OLD_FILES+=usr/share/isdn/8.al OLD_FILES+=usr/share/isdn/9.al OLD_FILES+=usr/share/isdn/beep.al OLD_FILES+=usr/share/isdn/msg.al OLD_DIRS+=usr/share/isdn OLD_FILES+=usr/share/man/man1/dtmfdecode.1.gz OLD_FILES+=usr/share/man/man1/g711conv.1.gz OLD_FILES+=usr/share/man/man4/i4b.4.gz OLD_FILES+=usr/share/man/man4/i4bcapi.4.gz OLD_FILES+=usr/share/man/man4/i4bctl.4.gz OLD_FILES+=usr/share/man/man4/i4bing.4.gz OLD_FILES+=usr/share/man/man4/i4bipr.4.gz OLD_FILES+=usr/share/man/man4/i4bisppp.4.gz OLD_FILES+=usr/share/man/man4/i4bq921.4.gz OLD_FILES+=usr/share/man/man4/i4bq931.4.gz OLD_FILES+=usr/share/man/man4/i4brbch.4.gz OLD_FILES+=usr/share/man/man4/i4btel.4.gz OLD_FILES+=usr/share/man/man4/i4btrc.4.gz OLD_FILES+=usr/share/man/man4/iavc.4.gz OLD_FILES+=usr/share/man/man4/isic.4.gz OLD_FILES+=usr/share/man/man4/ifpi.4.gz OLD_FILES+=usr/share/man/man4/ifpi2.4.gz OLD_FILES+=usr/share/man/man4/ifpnp.4.gz OLD_FILES+=usr/share/man/man4/ihfc.4.gz OLD_FILES+=usr/share/man/man4/itjc.4.gz OLD_FILES+=usr/share/man/man4/iwic.4.gz OLD_FILES+=usr/share/man/man5/isdnd.rc.5.gz OLD_FILES+=usr/share/man/man5/isdnd.rates.5.gz OLD_FILES+=usr/share/man/man5/isdnd.acct.5.gz OLD_FILES+=usr/share/man/man8/isdnd.8.gz OLD_FILES+=usr/share/man/man8/isdndebug.8.gz OLD_FILES+=usr/share/man/man8/isdndecode.8.gz OLD_FILES+=usr/share/man/man8/isdnmonitor.8.gz OLD_FILES+=usr/share/man/man8/isdnphone.8.gz OLD_FILES+=usr/share/man/man8/isdntel.8.gz OLD_FILES+=usr/share/man/man8/isdntelctl.8.gz OLD_FILES+=usr/share/man/man8/isdntrace.8.gz OLD_FILES+=usr/share/examples/isdn/contrib/README OLD_FILES+=usr/share/examples/isdn/contrib/anleitung.ppp OLD_FILES+=usr/share/examples/isdn/contrib/answer.c OLD_FILES+=usr/share/examples/isdn/contrib/answer.sh OLD_FILES+=usr/share/examples/isdn/contrib/convert.sh OLD_FILES+=usr/share/examples/isdn/contrib/hplay.c OLD_FILES+=usr/share/examples/isdn/contrib/i4b-ppp-newbie.txt OLD_FILES+=usr/share/examples/isdn/contrib/isdnctl OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct.pl OLD_FILES+=usr/share/examples/isdn/contrib/isdntelmux.c OLD_FILES+=usr/share/examples/isdn/contrib/mrtg-isp0.sh OLD_FILES+=usr/share/examples/isdn/i4brunppp/Makefile OLD_FILES+=usr/share/examples/isdn/i4brunppp/README OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp-isdnd.rc OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.8 OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.c OLD_FILES+=usr/share/examples/isdn/v21/Makefile OLD_FILES+=usr/share/examples/isdn/v21/README OLD_FILES+=usr/share/examples/isdn/v21/v21modem.c OLD_FILES+=usr/share/examples/isdn/FAQ OLD_FILES+=usr/share/examples/isdn/KERNEL OLD_FILES+=usr/share/examples/isdn/Overview OLD_FILES+=usr/share/examples/isdn/README OLD_FILES+=usr/share/examples/isdn/ROADMAP OLD_FILES+=usr/share/examples/isdn/ReleaseNotes OLD_FILES+=usr/share/examples/isdn/Resources OLD_FILES+=usr/share/examples/isdn/SupportedCards OLD_FILES+=usr/share/examples/isdn/ThankYou OLD_DIRS+=usr/share/examples/isdn/contrib OLD_DIRS+=usr/share/examples/isdn/i4brunppp OLD_DIRS+=usr/share/examples/isdn/v21 OLD_DIRS+=usr/share/examples/isdn OLD_FILES+=usr/share/examples/ppp/isdnd.rc OLD_FILES+=usr/share/examples/ppp/ppp.conf.isdn # 20080525: ng_atmpif removed OLD_FILES+=usr/include/netgraph/atm/ng_atmpif.h OLD_FILES+=usr/share/man/man4/ng_atmpif.4.gz # 20080522: pmap_addr_hint removed OLD_FILES+=usr/share/man/man9/pmap_addr_hint.9.gz # 20080517: ipsec_osdep.h removed OLD_FILES+=usr/include/netipsec/ipsec_osdep.h # 20080507: heimdal 1.1 import OLD_LIBS+=usr/lib/libasn1.so.9 OLD_LIBS+=usr/lib/libgssapi.so.9 OLD_LIBS+=usr/lib/libgssapi_krb5.so.9 OLD_LIBS+=usr/lib/libhdb.so.9 OLD_LIBS+=usr/lib/libkadm5clnt.so.9 OLD_LIBS+=usr/lib/libkadm5srv.so.9 OLD_LIBS+=usr/lib/libkafs5.so.9 OLD_LIBS+=usr/lib/libkrb5.so.9 OLD_LIBS+=usr/lib/libroken.so.9 OLD_LIBS+=usr/lib32/libgssapi.so.9 # 20080420: Symbol card support dropped OLD_FILES+=usr/include/dev/wi/spectrum24t_cf.h # 20080420: awi removal OLD_FILES+=usr/share/man/man4/awi.4.gz OLD_FILES+=usr/share/man/man4/if_awi.4.gz # 20080331: pkg_sign has been removed OLD_FILES+=usr/sbin/pkg_check OLD_FILES+=usr/sbin/pkg_sign OLD_FILES+=usr/share/man/man1/pkg_check.1.gz OLD_FILES+=usr/share/man/man1/pkg_sign.1.gz # 20080325: tzdata2008b import OLD_FILES+=usr/share/zoneinfo/Asia/Calcutta OLD_FILES+=usr/share/zoneinfo/Asia/Saigon # 20080314: stack_print(9) mlink fixed OLD_FILES+=usr/share/man/man9/stack_printf.9.gz # 20080312: libkse removal OLD_FILES+=usr/include/sys/kse.h OLD_FILES+=usr/lib/libkse.so OLD_LIBS+=usr/lib/libkse.so.3 OLD_FILES+=usr/share/man/man2/kse.2.gz OLD_FILES+=usr/share/man/man2/kse_create.2.gz OLD_FILES+=usr/share/man/man2/kse_exit.2.gz OLD_FILES+=usr/share/man/man2/kse_release.2.gz OLD_FILES+=usr/share/man/man2/kse_switchin.2.gz OLD_FILES+=usr/share/man/man2/kse_thr_interrupt.2.gz OLD_FILES+=usr/share/man/man2/kse_wakeup.2.gz OLD_FILES+=usr/lib32/libkse.so OLD_LIBS+=usr/lib32/libkse.so.3 # 20080225: bsdar/bsdranlib rename to ar/ranlib OLD_FILES+=usr/bin/bsdar OLD_FILES+=usr/bin/bsdranlib OLD_FILES+=usr/share/man/man1/bsdar.1.gz OLD_FILES+=usr/share/man/man1/bsdranlib.1.gz # 20080220: geom_lvm rename to geom_linux_lvm OLD_FILES+=usr/share/man/man4/geom_lvm.4.gz # 20080126: oldcard.4 removal OLD_FILES+=usr/share/man/man4/card.4.gz OLD_FILES+=usr/share/man/man4/oldcard.4.gz # 20080122: Removed from the tree OLD_FILES+=usr/share/man/man9/BUF_REFCNT.9.gz # 20080108: Moved to section 2 OLD_FILES+=usr/share/man/man3/shm_open.3.gz OLD_FILES+=usr/share/man/man3/shm_unlink.3.gz # 20071207: Merged with fortunes-o.real OLD_FILES+=usr/share/games/fortune/fortunes2-o OLD_FILES+=usr/share/games/fortune/fortunes2-o.dat # 20071201: Removal of XRPU driver OLD_FILES+=usr/include/sys/xrpuio.h # 20071129: Disabled static versions of libkse by default OLD_FILES+=usr/lib/libkse.a OLD_FILES+=usr/lib/libkse_p.a OLD_FILES+=usr/lib/libkse_pic.a OLD_FILES+=usr/lib32/libkse.a OLD_FILES+=usr/lib32/libkse_p.a OLD_FILES+=usr/lib32/libkse_pic.a # 20071129: Removed a Solaris compatibility header OLD_FILES+=usr/include/sys/_elf_solaris.h # 20071125: Renamed to pmc_get_msr() OLD_FILES+=usr/share/man/man3/pmc_x86_get_msr.3.gz # 20071108: Removed very crunch OLDCARD support file OLD_FILES+=etc/defaults/pccard.conf # 20071025: rc.d/nfslocking superseded by rc.d/lockd and rc.d/statd OLD_FILES+=etc/rc.d/nfslocking # 20070930: rename of cached to nscd OLD_FILES+=etc/cached.conf OLD_FILES+=etc/rc.d/cached OLD_FILES+=usr/sbin/cached OLD_FILES+=usr/share/man/man5/cached.conf.5.gz OLD_FILES+=usr/share/man/man8/cached.8.gz # 20070807: removal of PowerPC specific header file. .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/machine/interruptvar.h .endif # 20070801: fast_ipsec.4 gone OLD_FILES+=usr/share/man/man4/fast_ipsec.4.gz # 20070715: netatm temporarily disconnected (removed 20080525) OLD_FILES+=rescue/atm OLD_FILES+=rescue/fore_dnld OLD_FILES+=rescue/ilmid OLD_FILES+=sbin/atm OLD_FILES+=sbin/fore_dnld OLD_FILES+=sbin/ilmid OLD_FILES+=usr/include/libatm.h OLD_FILES+=usr/include/netatm/atm.h OLD_FILES+=usr/include/netatm/atm_cm.h OLD_FILES+=usr/include/netatm/atm_if.h OLD_FILES+=usr/include/netatm/atm_ioctl.h OLD_FILES+=usr/include/netatm/atm_pcb.h OLD_FILES+=usr/include/netatm/atm_sap.h OLD_FILES+=usr/include/netatm/atm_sigmgr.h OLD_FILES+=usr/include/netatm/atm_stack.h OLD_FILES+=usr/include/netatm/atm_sys.h OLD_FILES+=usr/include/netatm/atm_var.h OLD_FILES+=usr/include/netatm/atm_vc.h OLD_FILES+=usr/include/netatm/ipatm/ipatm.h OLD_FILES+=usr/include/netatm/ipatm/ipatm_serv.h OLD_FILES+=usr/include/netatm/ipatm/ipatm_var.h OLD_FILES+=usr/include/netatm/port.h OLD_FILES+=usr/include/netatm/queue.h OLD_FILES+=usr/include/netatm/sigpvc/sigpvc_var.h OLD_FILES+=usr/include/netatm/spans/spans_cls.h OLD_FILES+=usr/include/netatm/spans/spans_kxdr.h OLD_FILES+=usr/include/netatm/spans/spans_var.h OLD_FILES+=usr/include/netatm/uni/sscf_uni.h OLD_FILES+=usr/include/netatm/uni/sscf_uni_var.h OLD_FILES+=usr/include/netatm/uni/sscop.h OLD_FILES+=usr/include/netatm/uni/sscop_misc.h OLD_FILES+=usr/include/netatm/uni/sscop_pdu.h OLD_FILES+=usr/include/netatm/uni/sscop_var.h OLD_FILES+=usr/include/netatm/uni/uni.h OLD_FILES+=usr/include/netatm/uni/uniip_var.h OLD_FILES+=usr/include/netatm/uni/unisig.h OLD_FILES+=usr/include/netatm/uni/unisig_decode.h OLD_FILES+=usr/include/netatm/uni/unisig_mbuf.h OLD_FILES+=usr/include/netatm/uni/unisig_msg.h OLD_FILES+=usr/include/netatm/uni/unisig_print.h OLD_FILES+=usr/include/netatm/uni/unisig_var.h OLD_FILES+=usr/lib/libatm.a OLD_FILES+=usr/lib/libatm_p.a OLD_FILES+=usr/sbin/atmarpd OLD_FILES+=usr/sbin/scspd OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atm.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atmarpd.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/fore_dnld.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/ilmid.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/scspd.8.gz OLD_FILES+=usr/share/man/man8/atm.8.gz OLD_FILES+=usr/share/man/man8/atmarpd.8.gz OLD_FILES+=usr/share/man/man8/fore_dnld.8.gz OLD_FILES+=usr/share/man/man8/ilmid.8.gz OLD_FILES+=usr/share/man/man8/scspd.8.gz OLD_FILES+=usr/share/examples/atm/NOTES OLD_FILES+=usr/share/examples/atm/README OLD_FILES+=usr/share/examples/atm/Startup OLD_FILES+=usr/share/examples/atm/atm-config.sh OLD_FILES+=usr/share/examples/atm/atm-sockets.txt OLD_FILES+=usr/share/examples/atm/cpcs-design.txt OLD_FILES+=usr/share/examples/atm/fore-microcode.txt OLD_FILES+=usr/share/examples/atm/sscf-design.txt OLD_FILES+=usr/share/examples/atm/sscop-design.txt OLD_LIBS+=lib/libatm.so.5 OLD_LIBS+=usr/lib/libatm.so OLD_DIRS+=usr/include/netatm/sigpvc OLD_DIRS+=usr/include/netatm/spans OLD_DIRS+=usr/include/netatm/ipatm OLD_DIRS+=usr/include/netatm/uni OLD_DIRS+=usr/include/netatm OLD_DIRS+=usr/share/examples/atm OLD_FILES+=usr/lib32/libatm.a OLD_FILES+=usr/lib32/libatm.so OLD_LIBS+=usr/lib32/libatm.so.5 OLD_FILES+=usr/lib32/libatm_p.a # 20070705: I4B headers repo-copied to include/i4b/ .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/i4b_cause.h OLD_FILES+=usr/include/machine/i4b_debug.h OLD_FILES+=usr/include/machine/i4b_ioctl.h OLD_FILES+=usr/include/machine/i4b_rbch_ioctl.h OLD_FILES+=usr/include/machine/i4b_tel_ioctl.h OLD_FILES+=usr/include/machine/i4b_trace.h .endif # 20070703: pf 4.1 import OLD_FILES+=usr/libexec/ftp-proxy # 20070701: KAME IPSec removal OLD_FILES+=usr/include/netinet6/ah.h OLD_FILES+=usr/include/netinet6/ah6.h OLD_FILES+=usr/include/netinet6/ah_aesxcbcmac.h OLD_FILES+=usr/include/netinet6/esp.h OLD_FILES+=usr/include/netinet6/esp6.h OLD_FILES+=usr/include/netinet6/esp_aesctr.h OLD_FILES+=usr/include/netinet6/esp_camellia.h OLD_FILES+=usr/include/netinet6/esp_rijndael.h OLD_FILES+=usr/include/netinet6/ipsec.h OLD_FILES+=usr/include/netinet6/ipsec6.h OLD_FILES+=usr/include/netinet6/ipcomp.h OLD_FILES+=usr/include/netinet6/ipcomp6.h OLD_FILES+=usr/include/netkey/key.h OLD_FILES+=usr/include/netkey/key_debug.h OLD_FILES+=usr/include/netkey/key_var.h OLD_FILES+=usr/include/netkey/keydb.h OLD_FILES+=usr/include/netkey/keysock.h OLD_DIRS+=usr/include/netkey # 20070701: remove wicontrol OLD_FILES+=usr/sbin/wicontrol OLD_FILES+=usr/share/man/man8/wicontrol.8.gz # 20070625: umapfs removal OLD_FILES+=rescue/mount_umapfs OLD_FILES+=sbin/mount_umapfs OLD_FILES+=usr/include/fs/umapfs/umap.h OLD_FILES+=usr/share/man/man8/mount_umapfs.8.gz OLD_DIRS+=usr/include/fs/umapfs # 20070618: Removal of the PROTO.localhost* files OLD_FILES+=etc/namedb/PROTO.localhost-v6.rev OLD_FILES+=etc/namedb/PROTO.localhost.rev OLD_FILES+=etc/namedb/make-localhost # 20070618: shared library version bump OLD_LIBS+=lib/libalias.so.5 OLD_LIBS+=lib/libbsnmp.so.3 OLD_LIBS+=lib/libncurses.so.6 OLD_LIBS+=lib/libncursesw.so.6 OLD_LIBS+=lib/libreadline.so.6 OLD_LIBS+=usr/lib/libdialog.so.5 OLD_LIBS+=usr/lib/libgnuregex.so.3 OLD_LIBS+=usr/lib/libhistory.so.6 OLD_LIBS+=usr/lib/libpam.so.3 OLD_LIBS+=usr/lib/libssh.so.3 OLD_LIBS+=usr/lib/pam_chroot.so.3 OLD_LIBS+=usr/lib/pam_deny.so.3 OLD_LIBS+=usr/lib/pam_echo.so.3 OLD_LIBS+=usr/lib/pam_exec.so.3 OLD_LIBS+=usr/lib/pam_ftpusers.so.3 OLD_LIBS+=usr/lib/pam_group.so.3 OLD_LIBS+=usr/lib/pam_guest.so.3 OLD_LIBS+=usr/lib/pam_krb5.so.3 OLD_LIBS+=usr/lib/pam_ksu.so.3 OLD_LIBS+=usr/lib/pam_lastlog.so.3 OLD_LIBS+=usr/lib/pam_login_access.so.3 OLD_LIBS+=usr/lib/pam_nologin.so.3 OLD_LIBS+=usr/lib/pam_opie.so.3 OLD_LIBS+=usr/lib/pam_opieaccess.so.3 OLD_LIBS+=usr/lib/pam_passwdqc.so.3 OLD_LIBS+=usr/lib/pam_permit.so.3 OLD_LIBS+=usr/lib/pam_radius.so.3 OLD_LIBS+=usr/lib/pam_rhosts.so.3 OLD_LIBS+=usr/lib/pam_rootok.so.3 OLD_LIBS+=usr/lib/pam_securetty.so.3 OLD_LIBS+=usr/lib/pam_self.so.3 OLD_LIBS+=usr/lib/pam_ssh.so.3 OLD_LIBS+=usr/lib/pam_tacplus.so.3 OLD_LIBS+=usr/lib/pam_unix.so.3 OLD_LIBS+=usr/lib/snmp_atm.so.4 OLD_LIBS+=usr/lib/snmp_bridge.so.4 OLD_LIBS+=usr/lib/snmp_hostres.so.4 OLD_LIBS+=usr/lib/snmp_mibII.so.4 OLD_LIBS+=usr/lib/snmp_netgraph.so.4 OLD_LIBS+=usr/lib/snmp_pf.so.4 OLD_LIBS+=usr/lib32/libalias.so.5 OLD_LIBS+=usr/lib32/libbsnmp.so.3 OLD_LIBS+=usr/lib32/libdialog.so.5 OLD_LIBS+=usr/lib32/libgnuregex.so.3 OLD_LIBS+=usr/lib32/libhistory.so.6 OLD_LIBS+=usr/lib32/libncurses.so.6 OLD_LIBS+=usr/lib32/libncursesw.so.6 OLD_LIBS+=usr/lib32/libpam.so.3 OLD_LIBS+=usr/lib32/libreadline.so.6 OLD_LIBS+=usr/lib32/libssh.so.3 OLD_LIBS+=usr/lib32/pam_chroot.so.3 OLD_LIBS+=usr/lib32/pam_deny.so.3 OLD_LIBS+=usr/lib32/pam_echo.so.3 OLD_LIBS+=usr/lib32/pam_exec.so.3 OLD_LIBS+=usr/lib32/pam_ftpusers.so.3 OLD_LIBS+=usr/lib32/pam_group.so.3 OLD_LIBS+=usr/lib32/pam_guest.so.3 OLD_LIBS+=usr/lib32/pam_krb5.so.3 OLD_LIBS+=usr/lib32/pam_ksu.so.3 OLD_LIBS+=usr/lib32/pam_lastlog.so.3 OLD_LIBS+=usr/lib32/pam_login_access.so.3 OLD_LIBS+=usr/lib32/pam_nologin.so.3 OLD_LIBS+=usr/lib32/pam_opie.so.3 OLD_LIBS+=usr/lib32/pam_opieaccess.so.3 OLD_LIBS+=usr/lib32/pam_passwdqc.so.3 OLD_LIBS+=usr/lib32/pam_permit.so.3 OLD_LIBS+=usr/lib32/pam_radius.so.3 OLD_LIBS+=usr/lib32/pam_rhosts.so.3 OLD_LIBS+=usr/lib32/pam_rootok.so.3 OLD_LIBS+=usr/lib32/pam_securetty.so.3 OLD_LIBS+=usr/lib32/pam_self.so.3 OLD_LIBS+=usr/lib32/pam_ssh.so.3 OLD_LIBS+=usr/lib32/pam_tacplus.so.3 OLD_LIBS+=usr/lib32/pam_unix.so.3 # 20070613: IPX over IP tunnel removal OLD_FILES+=usr/include/netipx/ipx_ip.h # 20070605: sched_core removal OLD_FILES+=usr/share/man/man4/sched_core.4.gz # 20070603: BIND 9.4.1 import OLD_LIBS+=usr/lib/liblwres.so.10 # 20070521: shared library version bump OLD_LIBS+=lib/libatm.so.4 OLD_LIBS+=lib/libbegemot.so.2 OLD_LIBS+=lib/libbsdxml.so.2 OLD_LIBS+=lib/libcam.so.3 OLD_LIBS+=lib/libcrypt.so.3 OLD_LIBS+=lib/libdevstat.so.5 OLD_LIBS+=lib/libedit.so.5 OLD_LIBS+=lib/libgeom.so.3 OLD_LIBS+=lib/libipsec.so.2 OLD_LIBS+=lib/libipx.so.3 OLD_LIBS+=lib/libkiconv.so.2 OLD_LIBS+=lib/libkse.so.2 OLD_LIBS+=lib/libkvm.so.3 OLD_LIBS+=lib/libm.so.4 OLD_LIBS+=lib/libmd.so.3 OLD_LIBS+=lib/libpcap.so.4 OLD_LIBS+=lib/libpthread.so.2 OLD_LIBS+=lib/libsbuf.so.3 OLD_LIBS+=lib/libthr.so.2 OLD_LIBS+=lib/libufs.so.3 OLD_LIBS+=lib/libutil.so.6 OLD_LIBS+=lib/libz.so.3 OLD_LIBS+=usr/lib/libbluetooth.so.2 OLD_LIBS+=usr/lib/libbsm.so.1 OLD_LIBS+=usr/lib/libbz2.so.2 OLD_LIBS+=usr/lib/libcalendar.so.3 OLD_LIBS+=usr/lib/libcom_err.so.3 OLD_LIBS+=usr/lib/libdevinfo.so.3 OLD_LIBS+=usr/lib/libfetch.so.4 OLD_LIBS+=usr/lib/libform.so.3 OLD_LIBS+=usr/lib/libformw.so.3 OLD_LIBS+=usr/lib/libftpio.so.6 OLD_LIBS+=usr/lib/libgpib.so.1 OLD_LIBS+=usr/lib/libkse.so.2 OLD_LIBS+=usr/lib/libmagic.so.2 OLD_LIBS+=usr/lib/libmemstat.so.1 OLD_LIBS+=usr/lib/libmenu.so.3 OLD_LIBS+=usr/lib/libmenuw.so.3 OLD_LIBS+=usr/lib/libmilter.so.3 OLD_LIBS+=usr/lib/libmp.so.5 OLD_LIBS+=usr/lib/libncp.so.2 OLD_LIBS+=usr/lib/libnetgraph.so.2 OLD_LIBS+=usr/lib/libngatm.so.2 OLD_LIBS+=usr/lib/libopie.so.4 OLD_LIBS+=usr/lib/libpanel.so.3 OLD_LIBS+=usr/lib/libpanelw.so.3 OLD_LIBS+=usr/lib/libpmc.so.3 OLD_LIBS+=usr/lib/libradius.so.2 OLD_LIBS+=usr/lib/librpcsvc.so.3 OLD_LIBS+=usr/lib/libsdp.so.2 OLD_LIBS+=usr/lib/libsmb.so.2 OLD_LIBS+=usr/lib/libstdc++.so.5 OLD_LIBS+=usr/lib/libtacplus.so.2 OLD_LIBS+=usr/lib/libthr.so.2 OLD_LIBS+=usr/lib/libthread_db.so.2 OLD_LIBS+=usr/lib/libugidfw.so.2 OLD_LIBS+=usr/lib/libusbhid.so.2 OLD_LIBS+=usr/lib/libvgl.so.4 OLD_LIBS+=usr/lib/libwrap.so.4 OLD_LIBS+=usr/lib/libypclnt.so.2 OLD_LIBS+=usr/lib/snmp_bridge.so.3 OLD_LIBS+=usr/lib/snmp_hostres.so.3 OLD_LIBS+=usr/lib32/libatm.so.4 OLD_LIBS+=usr/lib32/libbegemot.so.2 OLD_LIBS+=usr/lib32/libbluetooth.so.2 OLD_LIBS+=usr/lib32/libbsdxml.so.2 OLD_LIBS+=usr/lib32/libbsm.so.1 OLD_LIBS+=usr/lib32/libbz2.so.2 OLD_LIBS+=usr/lib32/libcalendar.so.3 OLD_LIBS+=usr/lib32/libcam.so.3 OLD_LIBS+=usr/lib32/libcom_err.so.3 OLD_LIBS+=usr/lib32/libcrypt.so.3 OLD_LIBS+=usr/lib32/libdevinfo.so.3 OLD_LIBS+=usr/lib32/libdevstat.so.5 OLD_LIBS+=usr/lib32/libedit.so.5 OLD_LIBS+=usr/lib32/libfetch.so.4 OLD_LIBS+=usr/lib32/libform.so.3 OLD_LIBS+=usr/lib32/libformw.so.3 OLD_LIBS+=usr/lib32/libftpio.so.6 OLD_LIBS+=usr/lib32/libgeom.so.3 OLD_LIBS+=usr/lib32/libgpib.so.1 OLD_LIBS+=usr/lib32/libipsec.so.2 OLD_LIBS+=usr/lib32/libipx.so.3 OLD_LIBS+=usr/lib32/libkiconv.so.2 OLD_LIBS+=usr/lib32/libkse.so.2 OLD_LIBS+=usr/lib32/libkvm.so.3 OLD_LIBS+=usr/lib32/libm.so.4 OLD_LIBS+=usr/lib32/libmagic.so.2 OLD_LIBS+=usr/lib32/libmd.so.3 OLD_LIBS+=usr/lib32/libmemstat.so.1 OLD_LIBS+=usr/lib32/libmenu.so.3 OLD_LIBS+=usr/lib32/libmenuw.so.3 OLD_LIBS+=usr/lib32/libmilter.so.3 OLD_LIBS+=usr/lib32/libmp.so.5 OLD_LIBS+=usr/lib32/libncp.so.2 OLD_LIBS+=usr/lib32/libnetgraph.so.2 OLD_LIBS+=usr/lib32/libngatm.so.2 OLD_LIBS+=usr/lib32/libopie.so.4 OLD_LIBS+=usr/lib32/libpanel.so.3 OLD_LIBS+=usr/lib32/libpanelw.so.3 OLD_LIBS+=usr/lib32/libpcap.so.4 OLD_LIBS+=usr/lib32/libpmc.so.3 OLD_LIBS+=usr/lib32/libpthread.so.2 OLD_LIBS+=usr/lib32/libradius.so.2 OLD_LIBS+=usr/lib32/librpcsvc.so.3 OLD_LIBS+=usr/lib32/libsbuf.so.3 OLD_LIBS+=usr/lib32/libsdp.so.2 OLD_LIBS+=usr/lib32/libsmb.so.2 OLD_LIBS+=usr/lib32/libstdc++.so.5 OLD_LIBS+=usr/lib32/libtacplus.so.2 OLD_LIBS+=usr/lib32/libthr.so.2 OLD_LIBS+=usr/lib32/libthread_db.so.2 OLD_LIBS+=usr/lib32/libufs.so.3 OLD_LIBS+=usr/lib32/libugidfw.so.2 OLD_LIBS+=usr/lib32/libusbhid.so.2 OLD_LIBS+=usr/lib32/libutil.so.6 OLD_LIBS+=usr/lib32/libvgl.so.4 OLD_LIBS+=usr/lib32/libwrap.so.4 OLD_LIBS+=usr/lib32/libypclnt.so.2 OLD_LIBS+=usr/lib32/libz.so.3 # 20070519: GCC 4.2 OLD_FILES+=usr/bin/f77 OLD_FILES+=usr/bin/protoize OLD_FILES+=usr/include/g2c.h OLD_FILES+=usr/libexec/f771 OLD_FILES+=usr/share/info/g77.info.gz OLD_FILES+=usr/share/man/man1/f77.1.gz OLD_FILES+=usr/include/c++/3.4/algorithm OLD_FILES+=usr/include/c++/3.4/backward/algo.h OLD_FILES+=usr/include/c++/3.4/backward/algobase.h OLD_FILES+=usr/include/c++/3.4/backward/alloc.h OLD_FILES+=usr/include/c++/3.4/backward/backward_warning.h OLD_FILES+=usr/include/c++/3.4/backward/bvector.h OLD_FILES+=usr/include/c++/3.4/backward/complex.h OLD_FILES+=usr/include/c++/3.4/backward/defalloc.h OLD_FILES+=usr/include/c++/3.4/backward/deque.h OLD_FILES+=usr/include/c++/3.4/backward/fstream.h OLD_FILES+=usr/include/c++/3.4/backward/function.h OLD_FILES+=usr/include/c++/3.4/backward/hash_map.h OLD_FILES+=usr/include/c++/3.4/backward/hash_set.h OLD_FILES+=usr/include/c++/3.4/backward/hashtable.h OLD_FILES+=usr/include/c++/3.4/backward/heap.h OLD_FILES+=usr/include/c++/3.4/backward/iomanip.h OLD_FILES+=usr/include/c++/3.4/backward/iostream.h OLD_FILES+=usr/include/c++/3.4/backward/istream.h OLD_FILES+=usr/include/c++/3.4/backward/iterator.h OLD_FILES+=usr/include/c++/3.4/backward/list.h OLD_FILES+=usr/include/c++/3.4/backward/map.h OLD_FILES+=usr/include/c++/3.4/backward/multimap.h OLD_FILES+=usr/include/c++/3.4/backward/multiset.h OLD_FILES+=usr/include/c++/3.4/backward/new.h OLD_FILES+=usr/include/c++/3.4/backward/ostream.h OLD_FILES+=usr/include/c++/3.4/backward/pair.h OLD_FILES+=usr/include/c++/3.4/backward/queue.h OLD_FILES+=usr/include/c++/3.4/backward/rope.h OLD_FILES+=usr/include/c++/3.4/backward/set.h OLD_FILES+=usr/include/c++/3.4/backward/slist.h OLD_FILES+=usr/include/c++/3.4/backward/stack.h OLD_FILES+=usr/include/c++/3.4/backward/stream.h OLD_FILES+=usr/include/c++/3.4/backward/streambuf.h OLD_FILES+=usr/include/c++/3.4/backward/strstream OLD_FILES+=usr/include/c++/3.4/backward/tempbuf.h OLD_FILES+=usr/include/c++/3.4/backward/tree.h OLD_FILES+=usr/include/c++/3.4/backward/vector.h OLD_FILES+=usr/include/c++/3.4/bits/allocator.h OLD_FILES+=usr/include/c++/3.4/bits/atomic_word.h OLD_FILES+=usr/include/c++/3.4/bits/atomicity.h OLD_FILES+=usr/include/c++/3.4/bits/basic_file.h OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.h OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.tcc OLD_FILES+=usr/include/c++/3.4/bits/basic_string.h OLD_FILES+=usr/include/c++/3.4/bits/basic_string.tcc OLD_FILES+=usr/include/c++/3.4/bits/boost_concept_check.h OLD_FILES+=usr/include/c++/3.4/bits/c++allocator.h OLD_FILES+=usr/include/c++/3.4/bits/c++config.h OLD_FILES+=usr/include/c++/3.4/bits/c++io.h OLD_FILES+=usr/include/c++/3.4/bits/c++locale.h OLD_FILES+=usr/include/c++/3.4/bits/c++locale_internal.h OLD_FILES+=usr/include/c++/3.4/bits/char_traits.h OLD_FILES+=usr/include/c++/3.4/bits/cmath.tcc OLD_FILES+=usr/include/c++/3.4/bits/codecvt.h OLD_FILES+=usr/include/c++/3.4/bits/codecvt_specializations.h OLD_FILES+=usr/include/c++/3.4/bits/concept_check.h OLD_FILES+=usr/include/c++/3.4/bits/concurrence.h OLD_FILES+=usr/include/c++/3.4/bits/cpp_type_traits.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_base.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_inline.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_noninline.h OLD_FILES+=usr/include/c++/3.4/bits/deque.tcc OLD_FILES+=usr/include/c++/3.4/bits/fstream.tcc OLD_FILES+=usr/include/c++/3.4/bits/functexcept.h OLD_FILES+=usr/include/c++/3.4/bits/gslice.h OLD_FILES+=usr/include/c++/3.4/bits/gslice_array.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-default.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-posix.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-single.h OLD_FILES+=usr/include/c++/3.4/bits/gthr.h OLD_FILES+=usr/include/c++/3.4/bits/indirect_array.h OLD_FILES+=usr/include/c++/3.4/bits/ios_base.h OLD_FILES+=usr/include/c++/3.4/bits/istream.tcc OLD_FILES+=usr/include/c++/3.4/bits/list.tcc OLD_FILES+=usr/include/c++/3.4/bits/locale_classes.h OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.h OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.tcc OLD_FILES+=usr/include/c++/3.4/bits/localefwd.h OLD_FILES+=usr/include/c++/3.4/bits/mask_array.h OLD_FILES+=usr/include/c++/3.4/bits/messages_members.h OLD_FILES+=usr/include/c++/3.4/bits/os_defines.h OLD_FILES+=usr/include/c++/3.4/bits/ostream.tcc OLD_FILES+=usr/include/c++/3.4/bits/postypes.h OLD_FILES+=usr/include/c++/3.4/bits/slice_array.h OLD_FILES+=usr/include/c++/3.4/bits/sstream.tcc OLD_FILES+=usr/include/c++/3.4/bits/stl_algo.h OLD_FILES+=usr/include/c++/3.4/bits/stl_algobase.h OLD_FILES+=usr/include/c++/3.4/bits/stl_bvector.h OLD_FILES+=usr/include/c++/3.4/bits/stl_construct.h OLD_FILES+=usr/include/c++/3.4/bits/stl_deque.h OLD_FILES+=usr/include/c++/3.4/bits/stl_function.h OLD_FILES+=usr/include/c++/3.4/bits/stl_heap.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_funcs.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_types.h OLD_FILES+=usr/include/c++/3.4/bits/stl_list.h OLD_FILES+=usr/include/c++/3.4/bits/stl_map.h OLD_FILES+=usr/include/c++/3.4/bits/stl_multimap.h OLD_FILES+=usr/include/c++/3.4/bits/stl_multiset.h OLD_FILES+=usr/include/c++/3.4/bits/stl_numeric.h OLD_FILES+=usr/include/c++/3.4/bits/stl_pair.h OLD_FILES+=usr/include/c++/3.4/bits/stl_queue.h OLD_FILES+=usr/include/c++/3.4/bits/stl_raw_storage_iter.h OLD_FILES+=usr/include/c++/3.4/bits/stl_relops.h OLD_FILES+=usr/include/c++/3.4/bits/stl_set.h OLD_FILES+=usr/include/c++/3.4/bits/stl_stack.h OLD_FILES+=usr/include/c++/3.4/bits/stl_tempbuf.h OLD_FILES+=usr/include/c++/3.4/bits/stl_threads.h OLD_FILES+=usr/include/c++/3.4/bits/stl_tree.h OLD_FILES+=usr/include/c++/3.4/bits/stl_uninitialized.h OLD_FILES+=usr/include/c++/3.4/bits/stl_vector.h OLD_FILES+=usr/include/c++/3.4/bits/stream_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/streambuf.tcc OLD_FILES+=usr/include/c++/3.4/bits/streambuf_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/stringfwd.h OLD_FILES+=usr/include/c++/3.4/bits/time_members.h OLD_FILES+=usr/include/c++/3.4/bits/type_traits.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_after.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.tcc OLD_FILES+=usr/include/c++/3.4/bits/valarray_before.h OLD_FILES+=usr/include/c++/3.4/bits/vector.tcc OLD_FILES+=usr/include/c++/3.4/bitset OLD_FILES+=usr/include/c++/3.4/cassert OLD_FILES+=usr/include/c++/3.4/cctype OLD_FILES+=usr/include/c++/3.4/cerrno OLD_FILES+=usr/include/c++/3.4/cfloat OLD_FILES+=usr/include/c++/3.4/ciso646 OLD_FILES+=usr/include/c++/3.4/climits OLD_FILES+=usr/include/c++/3.4/clocale OLD_FILES+=usr/include/c++/3.4/cmath OLD_FILES+=usr/include/c++/3.4/complex OLD_FILES+=usr/include/c++/3.4/csetjmp OLD_FILES+=usr/include/c++/3.4/csignal OLD_FILES+=usr/include/c++/3.4/cstdarg OLD_FILES+=usr/include/c++/3.4/cstddef OLD_FILES+=usr/include/c++/3.4/cstdio OLD_FILES+=usr/include/c++/3.4/cstdlib OLD_FILES+=usr/include/c++/3.4/cstring OLD_FILES+=usr/include/c++/3.4/ctime OLD_FILES+=usr/include/c++/3.4/cwchar OLD_FILES+=usr/include/c++/3.4/cwctype OLD_FILES+=usr/include/c++/3.4/cxxabi.h OLD_FILES+=usr/include/c++/3.4/debug/bitset OLD_FILES+=usr/include/c++/3.4/debug/debug.h OLD_FILES+=usr/include/c++/3.4/debug/deque OLD_FILES+=usr/include/c++/3.4/debug/formatter.h OLD_FILES+=usr/include/c++/3.4/debug/hash_map OLD_FILES+=usr/include/c++/3.4/debug/hash_map.h OLD_FILES+=usr/include/c++/3.4/debug/hash_multimap.h OLD_FILES+=usr/include/c++/3.4/debug/hash_multiset.h OLD_FILES+=usr/include/c++/3.4/debug/hash_set OLD_FILES+=usr/include/c++/3.4/debug/hash_set.h OLD_FILES+=usr/include/c++/3.4/debug/list OLD_FILES+=usr/include/c++/3.4/debug/map OLD_FILES+=usr/include/c++/3.4/debug/map.h OLD_FILES+=usr/include/c++/3.4/debug/multimap.h OLD_FILES+=usr/include/c++/3.4/debug/multiset.h OLD_FILES+=usr/include/c++/3.4/debug/safe_base.h OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.h OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.tcc OLD_FILES+=usr/include/c++/3.4/debug/safe_sequence.h OLD_FILES+=usr/include/c++/3.4/debug/set OLD_FILES+=usr/include/c++/3.4/debug/set.h OLD_FILES+=usr/include/c++/3.4/debug/string OLD_FILES+=usr/include/c++/3.4/debug/vector OLD_FILES+=usr/include/c++/3.4/deque OLD_FILES+=usr/include/c++/3.4/exception OLD_FILES+=usr/include/c++/3.4/exception_defines.h OLD_FILES+=usr/include/c++/3.4/ext/algorithm OLD_FILES+=usr/include/c++/3.4/ext/bitmap_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/debug_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/enc_filebuf.h OLD_FILES+=usr/include/c++/3.4/ext/functional OLD_FILES+=usr/include/c++/3.4/ext/hash_fun.h OLD_FILES+=usr/include/c++/3.4/ext/hash_map OLD_FILES+=usr/include/c++/3.4/ext/hash_set OLD_FILES+=usr/include/c++/3.4/ext/hashtable.h OLD_FILES+=usr/include/c++/3.4/ext/iterator OLD_FILES+=usr/include/c++/3.4/ext/malloc_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/memory OLD_FILES+=usr/include/c++/3.4/ext/mt_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/new_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/numeric OLD_FILES+=usr/include/c++/3.4/ext/pod_char_traits.h OLD_FILES+=usr/include/c++/3.4/ext/pool_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/rb_tree OLD_FILES+=usr/include/c++/3.4/ext/rope OLD_FILES+=usr/include/c++/3.4/ext/ropeimpl.h OLD_FILES+=usr/include/c++/3.4/ext/slist OLD_FILES+=usr/include/c++/3.4/ext/stdio_filebuf.h OLD_FILES+=usr/include/c++/3.4/ext/stdio_sync_filebuf.h OLD_FILES+=usr/include/c++/3.4/fstream OLD_FILES+=usr/include/c++/3.4/functional OLD_FILES+=usr/include/c++/3.4/iomanip OLD_FILES+=usr/include/c++/3.4/ios OLD_FILES+=usr/include/c++/3.4/iosfwd OLD_FILES+=usr/include/c++/3.4/iostream OLD_FILES+=usr/include/c++/3.4/istream OLD_FILES+=usr/include/c++/3.4/iterator OLD_FILES+=usr/include/c++/3.4/limits OLD_FILES+=usr/include/c++/3.4/list OLD_FILES+=usr/include/c++/3.4/locale OLD_FILES+=usr/include/c++/3.4/map OLD_FILES+=usr/include/c++/3.4/memory OLD_FILES+=usr/include/c++/3.4/new OLD_FILES+=usr/include/c++/3.4/numeric OLD_FILES+=usr/include/c++/3.4/ostream OLD_FILES+=usr/include/c++/3.4/queue OLD_FILES+=usr/include/c++/3.4/set OLD_FILES+=usr/include/c++/3.4/sstream OLD_FILES+=usr/include/c++/3.4/stack OLD_FILES+=usr/include/c++/3.4/stdexcept OLD_FILES+=usr/include/c++/3.4/streambuf OLD_FILES+=usr/include/c++/3.4/string OLD_FILES+=usr/include/c++/3.4/typeinfo OLD_FILES+=usr/include/c++/3.4/utility OLD_FILES+=usr/include/c++/3.4/valarray OLD_FILES+=usr/include/c++/3.4/vector OLD_DIRS+=usr/include/c++/3.4/backward OLD_DIRS+=usr/include/c++/3.4/bits OLD_DIRS+=usr/include/c++/3.4/debug OLD_DIRS+=usr/include/c++/3.4/ext OLD_DIRS+=usr/include/c++/3.4 # 20070510: zpool/zfs moved to /sbin OLD_FILES+=usr/sbin/zfs OLD_FILES+=usr/sbin/zpool # 20070423: rc.bluetooth (examples) removed OLD_FILES+=usr/share/examples/netgraph/bluetooth/rc.bluetooth OLD_DIRS+=usr/share/examples/netgraph/bluetooth # 20070421: worm.4 removed OLD_FILES+=usr/share/man/man4/worm.4.gz # 20070417: trunk(4) renamed to lagg(4) OLD_FILES+=usr/include/net/if_trunk.h # 20070409: uuidgen moved to /bin/ OLD_FILES+=usr/bin/uuidgen # 20070328: bzip2 1.0.4 OLD_FILES+=usr/share/info/bzip2.info.gz # 20070303: libarchive 2.0 OLD_LIBS+=usr/lib/libarchive.so.3 OLD_LIBS+=usr/lib32/libarchive.so.3 # 20070301: remove addr2ascii and ascii2addr OLD_FILES+=usr/share/man/man3/addr2ascii.3.gz OLD_FILES+=usr/share/man/man3/ascii2addr.3.gz # 20070225: vm_page_unmanage() removed OLD_FILES+=usr/share/man/man9/vm_page_unmanage.9.gz # 20070216: VFS_VPTOFH(9) -> VOP_VPTOFH(9) OLD_FILES+=usr/share/man/man9/VFS_VPTOFH.9.gz # 20070212: kame.4 removed OLD_FILES+=usr/share/man/man4/kame.4.gz # 20070201: remove libmytinfo link OLD_FILES+=usr/lib/libmytinfo.a OLD_FILES+=usr/lib/libmytinfo.so OLD_FILES+=usr/lib/libmytinfo_p.a OLD_FILES+=usr/lib/libmytinfow.a OLD_FILES+=usr/lib/libmytinfow.so OLD_FILES+=usr/lib/libmytinfow_p.a OLD_FILES+=usr/lib32/libmytinfo.a OLD_FILES+=usr/lib32/libmytinfo.so OLD_FILES+=usr/lib32/libmytinfo_p.a OLD_FILES+=usr/lib32/libmytinfow.a OLD_FILES+=usr/lib32/libmytinfow.so OLD_FILES+=usr/lib32/libmytinfow_p.a # 20070128: remove vnconfig OLD_FILES+=usr/sbin/vnconfig # 20070127: remove bpf_compat.h OLD_FILES+=usr/include/net/bpf_compat.h # 20070125: objformat bites the dust OLD_FILES+=usr/bin/objformat OLD_FILES+=usr/share/man/man1/objformat.1.gz OLD_FILES+=usr/include/objformat.h OLD_FILES+=usr/share/man/man3/getobjformat.3.gz # 20061201: remove symlink to *.so.4 libalias modules OLD_FILES+=usr/lib/libalias_cuseeme.so OLD_FILES+=usr/lib/libalias_dummy.so OLD_FILES+=usr/lib/libalias_ftp.so OLD_FILES+=usr/lib/libalias_irc.so OLD_FILES+=usr/lib/libalias_nbt.so OLD_FILES+=usr/lib/libalias_pptp.so OLD_FILES+=usr/lib/libalias_skinny.so OLD_FILES+=usr/lib/libalias_smedia.so # 20061201: remove old *.so.4 libalias modules OLD_FILES+=lib/libalias_cuseeme.so.4 OLD_FILES+=lib/libalias_dummy.so.4 OLD_FILES+=lib/libalias_ftp.so.4 OLD_FILES+=lib/libalias_irc.so.4 OLD_FILES+=lib/libalias_nbt.so.4 OLD_FILES+=lib/libalias_pptp.so.4 OLD_FILES+=lib/libalias_skinny.so.4 OLD_FILES+=lib/libalias_smedia.so.4 # 20061126: remove old man page OLD_FILES+=usr/share/man/man3/archive_read_set_bytes_per_block.3.gz # 20061125: remove old man page OLD_FILES+=usr/share/man/man9/devsw.9.gz # 20061122: remove obsolete mount programs OLD_FILES+=sbin/mount_devfs OLD_FILES+=sbin/mount_ext2fs OLD_FILES+=sbin/mount_fdescfs OLD_FILES+=sbin/mount_linprocfs OLD_FILES+=sbin/mount_procfs OLD_FILES+=sbin/mount_std OLD_FILES+=rescue/mount_devfs OLD_FILES+=rescue/mount_ext2fs OLD_FILES+=rescue/mount_fdescfs OLD_FILES+=rescue/mount_linprocfs OLD_FILES+=rescue/mount_procfs OLD_FILES+=rescue/mount_std OLD_FILES+=usr/share/man/man8/mount_devfs.8.gz OLD_FILES+=usr/share/man/man8/mount_ext2fs.8.gz OLD_FILES+=usr/share/man/man8/mount_fdescfs.8.gz OLD_FILES+=usr/share/man/man8/mount_linprocfs.8.gz OLD_FILES+=usr/share/man/man8/mount_procfs.8.gz OLD_FILES+=usr/share/man/man8/mount_std.8.gz # 20061116: uhidev.4 removed OLD_FILES+=usr/share/man/man4/uhidev.4.gz # 20061106: archive_write_prepare.3 removed OLD_FILES+=usr/share/man/man3/archive_write_prepare.3.gz # 20061018: pccardc removed OLD_FILES+=usr/sbin/pccardc usr/share/man/man8/pccardc.8.gz # 20060930: demangle.h from contrib/libstdc++/include/ext/ OLD_FILES+=usr/include/c++/3.4/ext/demangle.h # 20060929: mrouted removed OLD_FILES+=usr/sbin/map-mbone OLD_FILES+=usr/sbin/mrinfo OLD_FILES+=usr/sbin/mrouted OLD_FILES+=usr/sbin/mtrace OLD_FILES+=usr/share/man/man8/map-mbone.8.gz OLD_FILES+=usr/share/man/man8/mrinfo.8.gz OLD_FILES+=usr/share/man/man8/mrouted.8.gz OLD_FILES+=usr/share/man/man8/mtrace.8.gz # 20060924: tcpslice removed OLD_FILES+=usr/sbin/tcpslice OLD_FILES+=usr/share/man/man1/tcpslice.1.gz # 20060829: kvmdb cleanup script removed OLD_FILES+=etc/periodic/weekly/120.clean-kvmdb # 20060822: ramdisk{,-own} have been replaced by mdconfig{,2} OLD_FILES+=etc/rc.d/ramdisk OLD_FILES+=etc/rc.d/ramdisk-own # 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade OLD_FILES+=usr/include/openssl/eng_int.h OLD_FILES+=usr/include/openssl/hw_4758_cca_err.h OLD_FILES+=usr/include/openssl/hw_aep_err.h OLD_FILES+=usr/include/openssl/hw_atalla_err.h OLD_FILES+=usr/include/openssl/hw_cswift_err.h OLD_FILES+=usr/include/openssl/hw_ncipher_err.h OLD_FILES+=usr/include/openssl/hw_nuron_err.h OLD_FILES+=usr/include/openssl/hw_sureware_err.h OLD_FILES+=usr/include/openssl/hw_ubsec_err.h # 20060713: mount_linsysfs(8) never existed in 7.x OLD_FILES+=sbin/mount_linsysfs OLD_FILES+=usr/share/man/man8/mount_linsysfs.8.gz # 20060704: KAME compat file net_osdep.h removed OLD_FILES+=usr/include/net/net_osdep.h # 20060605: man page links removed by OpenBSM 1.0 alpha 6 import OLD_FILES+=usr/share/man/man3/au_to_socket.3.gz OLD_FILES+=usr/share/man/man3/au_to_socket_ex_128.3.gz OLD_FILES+=usr/share/man/man3/au_to_socket_ex_32.3.gz # 20060517: pcvt removed OLD_FILES+=usr/share/pcvt/README.FIRST OLD_FILES+=usr/share/pcvt/Etc/xmodmap-german OLD_FILES+=usr/share/pcvt/Etc/pcvt.sh OLD_FILES+=usr/share/pcvt/Etc/pcvt.el OLD_FILES+=usr/share/pcvt/Etc/Terminfo OLD_FILES+=usr/share/pcvt/Etc/Termcap OLD_DIRS+=usr/share/pcvt/Etc OLD_FILES+=usr/share/pcvt/Doc/NotesAndHints OLD_FILES+=usr/share/pcvt/Doc/Keyboard.VT OLD_FILES+=usr/share/pcvt/Doc/Keyboard.HP OLD_FILES+=usr/share/pcvt/Doc/EscapeSequences OLD_FILES+=usr/share/pcvt/Doc/Charsets OLD_FILES+=usr/share/pcvt/Doc/CharGen OLD_FILES+=usr/share/pcvt/Doc/Bibliography OLD_FILES+=usr/share/pcvt/Doc/Acknowledgements OLD_DIRS+=usr/share/pcvt/Doc OLD_DIRS+=usr/share/pcvt OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.816 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.814 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.810 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.808 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.816 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.814 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.810 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.808 OLD_DIRS+=usr/share/misc/pcvtfonts OLD_FILES+=usr/share/misc/keycap.pcvt OLD_FILES+=usr/share/man/man8/ispcvt.8.gz OLD_FILES+=usr/share/man/man5/keycap.5.gz OLD_FILES+=usr/share/man/man4/pcvt.4.gz OLD_FILES+=usr/share/man/man3/kgetstr.3.gz OLD_FILES+=usr/share/man/man3/kgetnum.3.gz OLD_FILES+=usr/share/man/man3/kgetflag.3.gz OLD_FILES+=usr/share/man/man3/kgetent.3.gz OLD_FILES+=usr/share/man/man3/keycap.3.gz OLD_FILES+=usr/share/man/man1/vt220keys.1.gz OLD_FILES+=usr/share/man/man1/scon.1.gz OLD_FILES+=usr/share/man/man1/loadfont.1.gz OLD_FILES+=usr/share/man/man1/kcon.1.gz OLD_FILES+=usr/share/man/man1/fontedit.1.gz OLD_FILES+=usr/share/man/man1/cursor.1.gz OLD_FILES+=usr/sbin/vt220keys OLD_FILES+=usr/sbin/scon OLD_FILES+=usr/sbin/loadfont OLD_FILES+=usr/sbin/kcon OLD_FILES+=usr/sbin/ispcvt OLD_FILES+=usr/sbin/fontedit OLD_FILES+=usr/sbin/cursor OLD_FILES+=usr/lib/libkeycap_p.a OLD_FILES+=usr/lib/libkeycap.a OLD_FILES+=usr/include/machine/pcvt_ioctl.h # 20060514: lnc(4) replaced by le(4) OLD_FILES+=usr/share/man/man4/i386/lnc.4.gz # 20060512: remove ip6fw OLD_FILES+=etc/periodic/security/600.ip6fwdenied OLD_FILES+=etc/periodic/security/650.ip6fwlimit OLD_FILES+=sbin/ip6fw OLD_FILES+=usr/include/netinet6/ip6_fw.h OLD_FILES+=usr/share/man/man8/ip6fw.8.gz # 20060424: sab(4) removed OLD_FILES+=usr/share/man/man4/sab.4.gz # 20060328: remove redundant rc.d script OLD_FILES+=etc/rc.d/ike # 20060127: revert libdisk to static-only OLD_FILES+=usr/lib/libdisk.so # 20060115: sys/pccard includes cleanup OLD_FILES+=usr/include/pccard/driver.h OLD_FILES+=usr/include/pccard/i82365.h OLD_FILES+=usr/include/pccard/meciareg.h OLD_FILES+=usr/include/pccard/pccard_nbk.h OLD_FILES+=usr/include/pccard/pcic_pci.h OLD_FILES+=usr/include/pccard/pcicvar.h OLD_FILES+=usr/include/pccard/slot.h # 20051215: rescue/nextboot.sh renamed to rescue/nextboot OLD_FILES+=rescue/nextboot.sh # 20051214: usbd(8) removed OLD_FILES+=etc/rc.d/usbd OLD_FILES+=etc/usbd.conf OLD_FILES+=usr/sbin/usbd OLD_FILES+=usr/share/man/man8/usbd.8.gz # 20051029: rc.d/ppp-user renamed to rc.d/ppp for convenience OLD_FILES+=etc/rc.d/ppp-user # 20051012: setkey(8) moved to /sbin/ OLD_FILES+=usr/sbin/setkey # 20050930: pccardd(8) removed OLD_FILES+=usr/sbin/pccardd OLD_FILES+=usr/share/man/man5/pccard.conf.5.gz OLD_FILES+=usr/share/man/man8/pccardd.8.gz # 20050927: bridge(4) replaced by if_bridge(4) OLD_FILES+=usr/include/net/bridge.h # 20050831: not implemented OLD_FILES+=usr/share/man/man3/getino.3.gz OLD_FILES+=usr/share/man/man3/putino.3.gz # 20050825: T/TCP retired several months ago OLD_FILES+=usr/share/man/man4/ttcp.4.gz # 20050805 tn3270 retired long ago OLD_FILES+=usr/share/misc/map3270 # 20050801: too old to be interesting here OLD_FILES+=usr/share/doc/papers/px.ps.gz # 20050721: moved to ports OLD_FILES+=usr/sbin/vttest OLD_FILES+=usr/share/man/man1/vttest.1.gz # 20050617: wpa man pages moved to section 8 OLD_FILES+=usr/share/man/man1/hostapd.1.gz OLD_FILES+=usr/share/man/man1/hostapd_cli.1.gz OLD_FILES+=usr/share/man/man1/wpa_cli.1.gz OLD_FILES+=usr/share/man/man1/wpa_supplicant.1.gz # 20050610: rexecd (insecure by design) OLD_FILES+=etc/pam.d/rexecd OLD_FILES+=usr/share/man/man8/rexecd.8.gz OLD_FILES+=usr/libexec/rexecd # 20050606: OpenBSD dhclient replaces ISC one OLD_FILES+=bin/omshell OLD_FILES+=sbin/omshell OLD_FILES+=usr/share/man/man1/omshell.1.gz OLD_FILES+=usr/share/man/man5/dhcp-eval.5.gz # 200504XX: ipf tools moved from /usr to / OLD_FILES+=rescue/ipfs OLD_FILES+=rescue/ipfstat OLD_FILES+=rescue/ipmon OLD_FILES+=rescue/ipnat OLD_FILES+=usr/sbin/ipftest OLD_FILES+=usr/sbin/ipresend OLD_FILES+=usr/sbin/ipsend OLD_FILES+=usr/sbin/iptest OLD_FILES+=usr/share/man/man1/ipnat.1.gz OLD_FILES+=usr/share/man/man1/ipsend.1.gz OLD_FILES+=usr/share/man/man1/iptest.1.gz OLD_FILES+=usr/share/man/man5/ipsend.5.gz # 200503XX: bsdtar takes over gtar OLD_FILES+=usr/bin/gtar OLD_FILES+=usr/share/man/man1/gtar.1.gz # 200503XX OLD_FILES+=usr/share/man/man3/exp10.3.gz OLD_FILES+=usr/share/man/man3/exp10f.3.gz OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz # 20050324: updated release infrastructure OLD_FILES+=usr/share/man/man5/drivers.conf.5.gz # 20050317: removed from BIND 9 distribution OLD_FILES+=usr/share/doc/bind9/KNOWN_DEFECTS # 2005XXXX: OLD_FILES+=sbin/mount_autofs OLD_FILES+=usr/lib/libautofs.a OLD_FILES+=usr/lib/libautofs.so OLD_FILES+=usr/share/man/man8/mount_autofs.8.gz # 20050203: Merged with fortunes OLD_FILES+=usr/share/games/fortune/fortunes2 OLD_FILES+=usr/share/games/fortune/fortunes2.dat # 200501XX: OLD_FILES+=usr/libexec/getNAME # 200411XX: gvinum replaces vinum OLD_FILES+=bin/vinum OLD_FILES+=rescue/vinum OLD_FILES+=sbin/vinum OLD_FILES+=usr/share/man/man8/vinum.8.gz # 200411XX: libxpg4 removal OLD_FILES+=usr/lib/libxpg4.a OLD_FILES+=usr/lib/libxpg4.so OLD_FILES+=usr/lib/libxpg4_p.a # 20041109: replaced by em(4) OLD_FILES+=usr/share/man/man4/gx.4.gz OLD_FILES+=usr/share/man/man4/if_gx.4.gz # 20041017: rune interface removed OLD_FILES+=usr/include/rune.h OLD_FILES+=usr/share/man/man3/fgetrune.3.gz OLD_FILES+=usr/share/man/man3/fputrune.3.gz OLD_FILES+=usr/share/man/man3/fungetrune.3.gz OLD_FILES+=usr/share/man/man3/mbrrune.3.gz OLD_FILES+=usr/share/man/man3/mbrune.3.gz OLD_FILES+=usr/share/man/man3/rune.3.gz OLD_FILES+=usr/share/man/man3/setinvalidrune.3.gz OLD_FILES+=usr/share/man/man3/sgetrune.3.gz OLD_FILES+=usr/share/man/man3/sputrune.3.gz # 20040925: bind9 import OLD_FILES+=usr/bin/dnskeygen OLD_FILES+=usr/bin/dnsquery OLD_FILES+=usr/lib/libisc.a OLD_FILES+=usr/lib/libisc.so OLD_FILES+=usr/lib/libisc_p.a OLD_FILES+=usr/libexec/named-xfer OLD_FILES+=usr/sbin/named.restart OLD_FILES+=usr/sbin/ndc OLD_FILES+=usr/sbin/nslookup OLD_FILES+=usr/sbin/nsupdate OLD_FILES+=usr/share/doc/bind/html/acl.html OLD_FILES+=usr/share/doc/bind/html/address_list.html OLD_FILES+=usr/share/doc/bind/html/comments.html OLD_FILES+=usr/share/doc/bind/html/config.html OLD_FILES+=usr/share/doc/bind/html/controls.html OLD_FILES+=usr/share/doc/bind/html/docdef.html OLD_FILES+=usr/share/doc/bind/html/example.html OLD_FILES+=usr/share/doc/bind/html/include.html OLD_FILES+=usr/share/doc/bind/html/index.html OLD_FILES+=usr/share/doc/bind/html/key.html OLD_FILES+=usr/share/doc/bind/html/logging.html OLD_FILES+=usr/share/doc/bind/html/master.html OLD_FILES+=usr/share/doc/bind/html/options.html OLD_FILES+=usr/share/doc/bind/html/server.html OLD_FILES+=usr/share/doc/bind/html/trusted-keys.html OLD_FILES+=usr/share/doc/bind/html/zone.html OLD_FILES+=usr/share/doc/bind/misc/DynamicUpdate OLD_FILES+=usr/share/doc/bind/misc/FAQ.1of2 OLD_FILES+=usr/share/doc/bind/misc/FAQ.2of2 OLD_FILES+=usr/share/doc/bind/misc/rfc2317-notes.txt OLD_FILES+=usr/share/doc/bind/misc/style.txt OLD_FILES+=usr/share/man/man1/dnskeygen.1.gz OLD_FILES+=usr/share/man/man1/dnsquery.1.gz OLD_FILES+=usr/share/man/man8/named-bootconf.8.gz OLD_FILES+=usr/share/man/man8/named-xfer.8.gz OLD_FILES+=usr/share/man/man8/named.restart.8.gz OLD_FILES+=usr/share/man/man8/ndc.8.gz OLD_FILES+=usr/share/man/man8/nslookup.8.gz # 200409XX OLD_FILES+=usr/share/man/man3/ENSURE.3.gz OLD_FILES+=usr/share/man/man3/ENSURE_ERR.3.gz OLD_FILES+=usr/share/man/man3/INSIST.3.gz OLD_FILES+=usr/share/man/man3/INSIST_ERR.3.gz OLD_FILES+=usr/share/man/man3/INVARIANT.3.gz OLD_FILES+=usr/share/man/man3/INVARIANT_ERR.3.gz OLD_FILES+=usr/share/man/man3/REQUIRE.3.gz OLD_FILES+=usr/share/man/man3/REQUIRE_ERR.3.gz OLD_FILES+=usr/share/man/man3/assertion_type_to_text.3.gz OLD_FILES+=usr/share/man/man3/assertions.3.gz OLD_FILES+=usr/share/man/man3/bitncmp.3.gz OLD_FILES+=usr/share/man/man3/evAddTime.3.gz OLD_FILES+=usr/share/man/man3/evCancelConn.3.gz OLD_FILES+=usr/share/man/man3/evCancelRW.3.gz OLD_FILES+=usr/share/man/man3/evClearIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evClearTimer.3.gz OLD_FILES+=usr/share/man/man3/evCmpTime.3.gz OLD_FILES+=usr/share/man/man3/evConnFunc.3.gz OLD_FILES+=usr/share/man/man3/evConnect.3.gz OLD_FILES+=usr/share/man/man3/evConsIovec.3.gz OLD_FILES+=usr/share/man/man3/evConsTime.3.gz OLD_FILES+=usr/share/man/man3/evCreate.3.gz OLD_FILES+=usr/share/man/man3/evDefer.3.gz OLD_FILES+=usr/share/man/man3/evDeselectFD.3.gz OLD_FILES+=usr/share/man/man3/evDestroy.3.gz OLD_FILES+=usr/share/man/man3/evDispatch.3.gz OLD_FILES+=usr/share/man/man3/evDo.3.gz OLD_FILES+=usr/share/man/man3/evDrop.3.gz OLD_FILES+=usr/share/man/man3/evFileFunc.3.gz OLD_FILES+=usr/share/man/man3/evGetNext.3.gz OLD_FILES+=usr/share/man/man3/evHold.3.gz OLD_FILES+=usr/share/man/man3/evInitID.3.gz OLD_FILES+=usr/share/man/man3/evLastEventTime.3.gz OLD_FILES+=usr/share/man/man3/evListen.3.gz OLD_FILES+=usr/share/man/man3/evMainLoop.3.gz OLD_FILES+=usr/share/man/man3/evNowTime.3.gz OLD_FILES+=usr/share/man/man3/evPrintf.3.gz OLD_FILES+=usr/share/man/man3/evRead.3.gz OLD_FILES+=usr/share/man/man3/evResetTimer.3.gz OLD_FILES+=usr/share/man/man3/evSelectFD.3.gz OLD_FILES+=usr/share/man/man3/evSetDebug.3.gz OLD_FILES+=usr/share/man/man3/evSetIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evSetTimer.3.gz OLD_FILES+=usr/share/man/man3/evStreamFunc.3.gz OLD_FILES+=usr/share/man/man3/evSubTime.3.gz OLD_FILES+=usr/share/man/man3/evTestID.3.gz OLD_FILES+=usr/share/man/man3/evTimeRW.3.gz OLD_FILES+=usr/share/man/man3/evTimeSpec.3.gz OLD_FILES+=usr/share/man/man3/evTimeVal.3.gz OLD_FILES+=usr/share/man/man3/evTimerFunc.3.gz OLD_FILES+=usr/share/man/man3/evTouchIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evTryAccept.3.gz OLD_FILES+=usr/share/man/man3/evUnhold.3.gz OLD_FILES+=usr/share/man/man3/evUntimeRW.3.gz OLD_FILES+=usr/share/man/man3/evUnwait.3.gz OLD_FILES+=usr/share/man/man3/evWaitFor.3.gz OLD_FILES+=usr/share/man/man3/evWaitFunc.3.gz OLD_FILES+=usr/share/man/man3/evWrite.3.gz OLD_FILES+=usr/share/man/man3/eventlib.3.gz OLD_FILES+=usr/share/man/man3/heap.3.gz OLD_FILES+=usr/share/man/man3/heap_decreased.3.gz OLD_FILES+=usr/share/man/man3/heap_delete.3.gz OLD_FILES+=usr/share/man/man3/heap_element.3.gz OLD_FILES+=usr/share/man/man3/heap_for_each.3.gz OLD_FILES+=usr/share/man/man3/heap_free.3.gz OLD_FILES+=usr/share/man/man3/heap_increased.3.gz OLD_FILES+=usr/share/man/man3/heap_insert.3.gz OLD_FILES+=usr/share/man/man3/heap_new.3.gz OLD_FILES+=usr/share/man/man3/log_add_channel.3.gz OLD_FILES+=usr/share/man/man3/log_category_is_active.3.gz OLD_FILES+=usr/share/man/man3/log_close_stream.3.gz OLD_FILES+=usr/share/man/man3/log_dec_references.3.gz OLD_FILES+=usr/share/man/man3/log_free_channel.3.gz OLD_FILES+=usr/share/man/man3/log_free_context.3.gz OLD_FILES+=usr/share/man/man3/log_get_filename.3.gz OLD_FILES+=usr/share/man/man3/log_get_stream.3.gz OLD_FILES+=usr/share/man/man3/log_inc_references.3.gz OLD_FILES+=usr/share/man/man3/log_new_context.3.gz OLD_FILES+=usr/share/man/man3/log_new_file_channel.3.gz OLD_FILES+=usr/share/man/man3/log_new_null_channel.3.gz OLD_FILES+=usr/share/man/man3/log_new_syslog_channel.3.gz OLD_FILES+=usr/share/man/man3/log_open_stream.3.gz OLD_FILES+=usr/share/man/man3/log_option.3.gz OLD_FILES+=usr/share/man/man3/log_remove_channel.3.gz OLD_FILES+=usr/share/man/man3/log_set_file_owner.3.gz OLD_FILES+=usr/share/man/man3/log_vwrite.3.gz OLD_FILES+=usr/share/man/man3/log_write.3.gz OLD_FILES+=usr/share/man/man3/logging.3.gz OLD_FILES+=usr/share/man/man3/memcluster.3.gz OLD_FILES+=usr/share/man/man3/memget.3.gz OLD_FILES+=usr/share/man/man3/memput.3.gz OLD_FILES+=usr/share/man/man3/memstats.3.gz OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3. OLD_FILES+=usr/share/man/man3/sigwait.3.gz OLD_FILES+=usr/share/man/man3/tree_add.3.gz OLD_FILES+=usr/share/man/man3/tree_delete.3.gz OLD_FILES+=usr/share/man/man3/tree_init.3.gz OLD_FILES+=usr/share/man/man3/tree_mung.3.gz OLD_FILES+=usr/share/man/man3/tree_srch.3.gz OLD_FILES+=usr/share/man/man3/tree_trav.3.gz # 2004XXYY: OS internal libs, no ports use them, no need to use OLD_LIBS OLD_FILES+=lib/geom/geom_concat.so.1 OLD_FILES+=lib/geom/geom_label.so.1 OLD_FILES+=lib/geom/geom_nop.so.1 OLD_FILES+=lib/geom/geom_stripe.so.1 # 20040728: GCC 3.4.2 OLD_DIRS+=usr/include/c++/3.3 OLD_FILES+=usr/include/c++/3.3/FlexLexer.h OLD_FILES+=usr/include/c++/3.3/algorithm OLD_FILES+=usr/include/c++/3.3/backward/algo.h OLD_FILES+=usr/include/c++/3.3/backward/algobase.h OLD_FILES+=usr/include/c++/3.3/backward/alloc.h OLD_FILES+=usr/include/c++/3.3/backward/backward_warning.h OLD_FILES+=usr/include/c++/3.3/backward/bvector.h OLD_FILES+=usr/include/c++/3.3/backward/complex.h OLD_FILES+=usr/include/c++/3.3/backward/defalloc.h OLD_FILES+=usr/include/c++/3.3/backward/deque.h OLD_FILES+=usr/include/c++/3.3/backward/fstream.h OLD_FILES+=usr/include/c++/3.3/backward/function.h OLD_FILES+=usr/include/c++/3.3/backward/hash_map.h OLD_FILES+=usr/include/c++/3.3/backward/hash_set.h OLD_FILES+=usr/include/c++/3.3/backward/hashtable.h OLD_FILES+=usr/include/c++/3.3/backward/heap.h OLD_FILES+=usr/include/c++/3.3/backward/iomanip.h OLD_FILES+=usr/include/c++/3.3/backward/iostream.h OLD_FILES+=usr/include/c++/3.3/backward/istream.h OLD_FILES+=usr/include/c++/3.3/backward/iterator.h OLD_FILES+=usr/include/c++/3.3/backward/list.h OLD_FILES+=usr/include/c++/3.3/backward/map.h OLD_FILES+=usr/include/c++/3.3/backward/multimap.h OLD_FILES+=usr/include/c++/3.3/backward/multiset.h OLD_FILES+=usr/include/c++/3.3/backward/new.h OLD_FILES+=usr/include/c++/3.3/backward/ostream.h OLD_FILES+=usr/include/c++/3.3/backward/pair.h OLD_FILES+=usr/include/c++/3.3/backward/queue.h OLD_FILES+=usr/include/c++/3.3/backward/rope.h OLD_FILES+=usr/include/c++/3.3/backward/set.h OLD_FILES+=usr/include/c++/3.3/backward/slist.h OLD_FILES+=usr/include/c++/3.3/backward/stack.h OLD_FILES+=usr/include/c++/3.3/backward/stream.h OLD_FILES+=usr/include/c++/3.3/backward/streambuf.h OLD_FILES+=usr/include/c++/3.3/backward/strstream OLD_FILES+=usr/include/c++/3.3/backward/strstream.h OLD_FILES+=usr/include/c++/3.3/backward/tempbuf.h OLD_FILES+=usr/include/c++/3.3/backward/tree.h OLD_FILES+=usr/include/c++/3.3/backward/vector.h OLD_DIRS+=usr/include/c++/3.3/backward OLD_FILES+=usr/include/c++/3.3/bits/atomicity.h OLD_FILES+=usr/include/c++/3.3/bits/basic_file.h OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.h OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.tcc OLD_FILES+=usr/include/c++/3.3/bits/basic_string.h OLD_FILES+=usr/include/c++/3.3/bits/basic_string.tcc OLD_FILES+=usr/include/c++/3.3/bits/boost_concept_check.h OLD_FILES+=usr/include/c++/3.3/bits/c++config.h OLD_FILES+=usr/include/c++/3.3/bits/c++io.h OLD_FILES+=usr/include/c++/3.3/bits/c++locale.h OLD_FILES+=usr/include/c++/3.3/bits/c++locale_internal.h OLD_FILES+=usr/include/c++/3.3/bits/char_traits.h OLD_FILES+=usr/include/c++/3.3/bits/cmath.tcc OLD_FILES+=usr/include/c++/3.3/bits/codecvt.h OLD_FILES+=usr/include/c++/3.3/bits/codecvt_specializations.h OLD_FILES+=usr/include/c++/3.3/bits/concept_check.h OLD_FILES+=usr/include/c++/3.3/bits/cpp_type_traits.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_base.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_inline.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_noninline.h OLD_FILES+=usr/include/c++/3.3/bits/deque.tcc OLD_FILES+=usr/include/c++/3.3/bits/fpos.h OLD_FILES+=usr/include/c++/3.3/bits/fstream.tcc OLD_FILES+=usr/include/c++/3.3/bits/functexcept.h OLD_FILES+=usr/include/c++/3.3/bits/generic_shadow.h OLD_FILES+=usr/include/c++/3.3/bits/gslice.h OLD_FILES+=usr/include/c++/3.3/bits/gslice_array.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-default.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-posix.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-single.h OLD_FILES+=usr/include/c++/3.3/bits/gthr.h OLD_FILES+=usr/include/c++/3.3/bits/indirect_array.h OLD_FILES+=usr/include/c++/3.3/bits/ios_base.h OLD_FILES+=usr/include/c++/3.3/bits/istream.tcc OLD_FILES+=usr/include/c++/3.3/bits/list.tcc OLD_FILES+=usr/include/c++/3.3/bits/locale_classes.h OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.h OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.tcc OLD_FILES+=usr/include/c++/3.3/bits/localefwd.h OLD_FILES+=usr/include/c++/3.3/bits/mask_array.h OLD_FILES+=usr/include/c++/3.3/bits/messages_members.h OLD_FILES+=usr/include/c++/3.3/bits/os_defines.h OLD_FILES+=usr/include/c++/3.3/bits/ostream.tcc OLD_FILES+=usr/include/c++/3.3/bits/pthread_allocimpl.h OLD_FILES+=usr/include/c++/3.3/bits/slice.h OLD_FILES+=usr/include/c++/3.3/bits/slice_array.h OLD_FILES+=usr/include/c++/3.3/bits/sstream.tcc OLD_FILES+=usr/include/c++/3.3/bits/stl_algo.h OLD_FILES+=usr/include/c++/3.3/bits/stl_algobase.h OLD_FILES+=usr/include/c++/3.3/bits/stl_alloc.h OLD_FILES+=usr/include/c++/3.3/bits/stl_bvector.h OLD_FILES+=usr/include/c++/3.3/bits/stl_construct.h OLD_FILES+=usr/include/c++/3.3/bits/stl_deque.h OLD_FILES+=usr/include/c++/3.3/bits/stl_function.h OLD_FILES+=usr/include/c++/3.3/bits/stl_heap.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_funcs.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_types.h OLD_FILES+=usr/include/c++/3.3/bits/stl_list.h OLD_FILES+=usr/include/c++/3.3/bits/stl_map.h OLD_FILES+=usr/include/c++/3.3/bits/stl_multimap.h OLD_FILES+=usr/include/c++/3.3/bits/stl_multiset.h OLD_FILES+=usr/include/c++/3.3/bits/stl_numeric.h OLD_FILES+=usr/include/c++/3.3/bits/stl_pair.h OLD_FILES+=usr/include/c++/3.3/bits/stl_pthread_alloc.h OLD_FILES+=usr/include/c++/3.3/bits/stl_queue.h OLD_FILES+=usr/include/c++/3.3/bits/stl_raw_storage_iter.h OLD_FILES+=usr/include/c++/3.3/bits/stl_relops.h OLD_FILES+=usr/include/c++/3.3/bits/stl_set.h OLD_FILES+=usr/include/c++/3.3/bits/stl_stack.h OLD_FILES+=usr/include/c++/3.3/bits/stl_tempbuf.h OLD_FILES+=usr/include/c++/3.3/bits/stl_threads.h OLD_FILES+=usr/include/c++/3.3/bits/stl_tree.h OLD_FILES+=usr/include/c++/3.3/bits/stl_uninitialized.h OLD_FILES+=usr/include/c++/3.3/bits/stl_vector.h OLD_FILES+=usr/include/c++/3.3/bits/stream_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/streambuf.tcc OLD_FILES+=usr/include/c++/3.3/bits/streambuf_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/stringfwd.h OLD_FILES+=usr/include/c++/3.3/bits/time_members.h OLD_FILES+=usr/include/c++/3.3/bits/type_traits.h OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.h OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.tcc OLD_FILES+=usr/include/c++/3.3/bits/valarray_meta.h OLD_FILES+=usr/include/c++/3.3/bits/vector.tcc OLD_DIRS+=usr/include/c++/3.3/bits OLD_FILES+=usr/include/c++/3.3/bitset OLD_FILES+=usr/include/c++/3.3/cassert OLD_FILES+=usr/include/c++/3.3/cctype OLD_FILES+=usr/include/c++/3.3/cerrno OLD_FILES+=usr/include/c++/3.3/cfloat OLD_FILES+=usr/include/c++/3.3/ciso646 OLD_FILES+=usr/include/c++/3.3/climits OLD_FILES+=usr/include/c++/3.3/clocale OLD_FILES+=usr/include/c++/3.3/cmath OLD_FILES+=usr/include/c++/3.3/complex OLD_FILES+=usr/include/c++/3.3/csetjmp OLD_FILES+=usr/include/c++/3.3/csignal OLD_FILES+=usr/include/c++/3.3/cstdarg OLD_FILES+=usr/include/c++/3.3/cstddef OLD_FILES+=usr/include/c++/3.3/cstdio OLD_FILES+=usr/include/c++/3.3/cstdlib OLD_FILES+=usr/include/c++/3.3/cstring OLD_FILES+=usr/include/c++/3.3/ctime OLD_FILES+=usr/include/c++/3.3/cwchar OLD_FILES+=usr/include/c++/3.3/cwctype OLD_FILES+=usr/include/c++/3.3/cxxabi.h OLD_FILES+=usr/include/c++/3.3/deque OLD_FILES+=usr/include/c++/3.3/exception OLD_FILES+=usr/include/c++/3.3/exception_defines.h OLD_FILES+=usr/include/c++/3.3/ext/algorithm OLD_FILES+=usr/include/c++/3.3/ext/enc_filebuf.h OLD_FILES+=usr/include/c++/3.3/ext/functional OLD_FILES+=usr/include/c++/3.3/ext/hash_map OLD_FILES+=usr/include/c++/3.3/ext/hash_set OLD_FILES+=usr/include/c++/3.3/ext/iterator OLD_FILES+=usr/include/c++/3.3/ext/memory OLD_FILES+=usr/include/c++/3.3/ext/numeric OLD_FILES+=usr/include/c++/3.3/ext/rb_tree OLD_FILES+=usr/include/c++/3.3/ext/rope OLD_FILES+=usr/include/c++/3.3/ext/ropeimpl.h OLD_FILES+=usr/include/c++/3.3/ext/slist OLD_FILES+=usr/include/c++/3.3/ext/stdio_filebuf.h OLD_FILES+=usr/include/c++/3.3/ext/stl_hash_fun.h OLD_FILES+=usr/include/c++/3.3/ext/stl_hashtable.h OLD_FILES+=usr/include/c++/3.3/ext/stl_rope.h OLD_DIRS+=usr/include/c++/3.3/ext OLD_FILES+=usr/include/c++/3.3/fstream OLD_FILES+=usr/include/c++/3.3/functional OLD_FILES+=usr/include/c++/3.3/iomanip OLD_FILES+=usr/include/c++/3.3/ios OLD_FILES+=usr/include/c++/3.3/iosfwd OLD_FILES+=usr/include/c++/3.3/iostream OLD_FILES+=usr/include/c++/3.3/istream OLD_FILES+=usr/include/c++/3.3/iterator OLD_FILES+=usr/include/c++/3.3/limits OLD_FILES+=usr/include/c++/3.3/list OLD_FILES+=usr/include/c++/3.3/locale OLD_FILES+=usr/include/c++/3.3/map OLD_FILES+=usr/include/c++/3.3/memory OLD_FILES+=usr/include/c++/3.3/new OLD_FILES+=usr/include/c++/3.3/numeric OLD_FILES+=usr/include/c++/3.3/ostream OLD_FILES+=usr/include/c++/3.3/queue OLD_FILES+=usr/include/c++/3.3/set OLD_FILES+=usr/include/c++/3.3/sstream OLD_FILES+=usr/include/c++/3.3/stack OLD_FILES+=usr/include/c++/3.3/stdexcept OLD_FILES+=usr/include/c++/3.3/streambuf OLD_FILES+=usr/include/c++/3.3/string OLD_FILES+=usr/include/c++/3.3/typeinfo OLD_FILES+=usr/include/c++/3.3/utility OLD_FILES+=usr/include/c++/3.3/valarray OLD_FILES+=usr/include/c++/3.3/vector # 20040713: fla(4) removed. OLD_FILES+=usr/share/man/man4/fla.4.gz # 200407XX OLD_FILES+=usr/sbin/kernbb OLD_FILES+=usr/sbin/ntp-genkeys OLD_FILES+=usr/sbin/ntptimeset OLD_FILES+=usr/share/man/man8/kernbb.8.gz OLD_FILES+=usr/share/man/man8/ntp-genkeys.8.gz # 20040627: usbdevs.h and usbdevs_data.h removal OLD_FILES+=usr/include/dev/usb/usbdevs.h OLD_FILES+=usr/include/dev/usb/usbdevs_data.h # 200406XX OLD_FILES+=usr/bin/gasp OLD_FILES+=usr/bin/gdbreplay OLD_FILES+=usr/share/man/man1/gasp.1.gz OLD_FILES+=sbin/mountd OLD_FILES+=sbin/mount_fdesc OLD_FILES+=sbin/mount_umap OLD_FILES+=sbin/mount_union OLD_FILES+=sbin/mount_msdos OLD_FILES+=sbin/mount_null OLD_FILES+=sbin/mount_kernfs # 200405XX: arl OLD_FILES+=usr/sbin/arlconfig OLD_FILES+=usr/share/man/man8/arlconfig.8.gz # 200403XX OLD_FILES+=bin/raidctl OLD_FILES+=sbin/raidctl OLD_FILES+=usr/bin/sasc OLD_FILES+=usr/sbin/sgsc OLD_FILES+=usr/sbin/stlload OLD_FILES+=usr/sbin/stlstats OLD_FILES+=usr/share/man/man1/sasc.1.gz OLD_FILES+=usr/share/man/man1/sgsc.1.gz OLD_FILES+=usr/share/man/man4/i386/stl.4.gz OLD_FILES+=usr/share/man/man8/raidctl.8.gz # 20040229: clean_environment() was removed after 3 days OLD_FILES+=usr/share/man/man3/clean_environment.3.gz # 20040119: installed as `isdntel' in newer systems OLD_FILES+=etc/isdn/isdntel.sh # 200XYYZZ: /lib transition clitches OLD_FILES+=lib/libalias.so OLD_FILES+=lib/libatm.so OLD_FILES+=lib/libbsdxml.so OLD_FILES+=lib/libc.so OLD_FILES+=lib/libcam.so OLD_FILES+=lib/libcrypt.so OLD_FILES+=lib/libcrypto.so OLD_FILES+=lib/libdevstat.so OLD_FILES+=lib/libedit.so OLD_FILES+=lib/libgeom.so OLD_FILES+=lib/libipsec.so OLD_FILES+=lib/libipx.so OLD_FILES+=lib/libkvm.so OLD_FILES+=lib/libm.so OLD_FILES+=lib/libmd.so OLD_FILES+=lib/libncurses.so OLD_FILES+=lib/libreadline.so OLD_FILES+=lib/libsbuf.so OLD_FILES+=lib/libufs.so OLD_FILES+=lib/libz.so # 200312XX OLD_FILES+=bin/cxconfig OLD_FILES+=sbin/cxconfig OLD_FILES+=usr/share/man/man8/cxconfig.8.gz # 20031016: MULTI_DRIVER_MODULE macro removed OLD_FILES+=usr/share/man/man9/MULTI_DRIVER_MODULE.9.gz # 200309XX OLD_FILES+=usr/bin/symorder OLD_FILES+=usr/share/man/man1/symorder.1.gz # 200308XX OLD_FILES+=usr/sbin/amldb OLD_FILES+=usr/share/man/man8/amldb.8.gz # 200307XX OLD_FILES+=sbin/mount_nwfs OLD_FILES+=sbin/mount_portalfs OLD_FILES+=sbin/mount_smbfs # 200306XX OLD_FILES+=usr/sbin/dev_mkdb OLD_FILES+=usr/share/man/man8/dev_mkdb.8.gz # 200304XX OLD_FILES+=usr/lib/libcipher.a OLD_FILES+=usr/lib/libcipher.so OLD_FILES+=usr/lib/libcipher_p.a OLD_FILES+=usr/lib/libgmp.a OLD_FILES+=usr/lib/libgmp.so OLD_FILES+=usr/lib/libgmp_p.a OLD_FILES+=usr/lib/libperl.a OLD_FILES+=usr/lib/libperl.so OLD_FILES+=usr/lib/libperl_p.a OLD_FILES+=usr/lib/libposix1e.a OLD_FILES+=usr/lib/libposix1e.so OLD_FILES+=usr/lib/libposix1e_p.a OLD_FILES+=usr/lib/libskey.a OLD_FILES+=usr/lib/libskey.so OLD_FILES+=usr/lib/libskey_p.a OLD_FILES+=usr/libexec/tradcpp0 OLD_FILES+=usr/libexec/cpp0 # 200304XX: removal of xten OLD_FILES+=usr/libexec/xtend OLD_FILES+=usr/sbin/xten OLD_FILES+=usr/share/man/man1/xten.1.gz OLD_FILES+=usr/share/man/man8/xtend.8.gz # 200303XX OLD_FILES+=usr/lib/libacl.so OLD_FILES+=usr/lib/libdescrypt.so OLD_FILES+=usr/lib/libf2c.so OLD_FILES+=usr/lib/libg++.so OLD_FILES+=usr/lib/libkdb.so OLD_FILES+=usr/lib/librsaINTL.so OLD_FILES+=usr/lib/libscrypt.so OLD_FILES+=usr/lib/libss.so # 200302XX OLD_FILES+=usr/lib/libacl.a OLD_FILES+=usr/lib/libacl_p.a OLD_FILES+=usr/lib/libkadm.a OLD_FILES+=usr/lib/libkadm.so OLD_FILES+=usr/lib/libkadm_p.a OLD_FILES+=usr/lib/libkafs.a OLD_FILES+=usr/lib/libkafs.so OLD_FILES+=usr/lib/libkafs_p.a OLD_FILES+=usr/lib/libkdb.a OLD_FILES+=usr/lib/libkdb_p.a OLD_FILES+=usr/lib/libkrb.a OLD_FILES+=usr/lib/libkrb.so OLD_FILES+=usr/lib/libkrb_p.a OLD_FILES+=usr/share/man/man3/SSL_CIPHER_get_name.3.gz OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3 OLD_FILES+=usr/share/man/man3/SSL_CTX_add_extra_chain_cert.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_add_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_ctrl.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_flush_sessions.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_get_verify_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_load_verify_locations.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_new.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_number.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_cache_size.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_get_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sessions.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_store.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_verify_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cipher_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_CA_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_cert_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_default_passwd_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_generate_session_id.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_info_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_max_cert_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_msg_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_options.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_quiet_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_cache_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_id_context.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_ssl_version.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_timeout.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_dh_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_verify.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_use_certificate.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_time.3.gz OLD_FILES+=usr/share/man/man3/SSL_accept.3.gz OLD_FILES+=usr/share/man/man3/SSL_alert_type_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_clear.3.gz OLD_FILES+=usr/share/man/man3/SSL_connect.3.gz OLD_FILES+=usr/share/man/man3/SSL_do_handshake.3.gz OLD_FILES+=usr/share/man/man3/SSL_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_SSL_CTX.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ciphers.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_client_CA_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_current_cipher.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_default_timeout.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_error.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ex_data_X509_STORE_CTX_idx.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_fd.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_peer_cert_chain.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_peer_certificate.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_rbio.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_verify_result.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_version.3.gz OLD_FILES+=usr/share/man/man3/SSL_library_init.3.gz OLD_FILES+=usr/share/man/man3/SSL_load_client_CA_file.3.gz OLD_FILES+=usr/share/man/man3/SSL_new.3.gz OLD_FILES+=usr/share/man/man3/SSL_pending.3.gz OLD_FILES+=usr/share/man/man3/SSL_read.3.gz OLD_FILES+=usr/share/man/man3/SSL_rstate_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_session_reused.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_bio.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_connect_state.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_fd.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_verify_result.3.gz OLD_FILES+=usr/share/man/man3/SSL_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_state_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_want.3.gz OLD_FILES+=usr/share/man/man3/SSL_write.3.gz OLD_FILES+=usr/share/man/man3/d2i_SSL_SESSION.3.gz # 200301XX OLD_FILES+=usr/share/man/man3/des_3cbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_3ecb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_cbc_cksum.3.gz OLD_FILES+=usr/share/man/man3/des_cbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_cfb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_ecb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_enc_read.3.gz OLD_FILES+=usr/share/man/man3/des_enc_write.3.gz OLD_FILES+=usr/share/man/man3/des_is_weak_key.3.gz OLD_FILES+=usr/share/man/man3/des_key_sched.3.gz OLD_FILES+=usr/share/man/man3/des_ofb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_pcbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_quad_cksum.3.gz OLD_FILES+=usr/share/man/man3/des_random_key.3.gz OLD_FILES+=usr/share/man/man3/des_read_2password.3.gz OLD_FILES+=usr/share/man/man3/des_read_password.3.gz OLD_FILES+=usr/share/man/man3/des_read_pw_string.3.gz OLD_FILES+=usr/share/man/man3/des_set_key.3.gz OLD_FILES+=usr/share/man/man3/des_set_odd_parity.3.gz OLD_FILES+=usr/share/man/man3/des_string_to_2key.3.gz OLD_FILES+=usr/share/man/man3/des_string_to_key.3.gz # 200212XX OLD_FILES+=usr/sbin/kenv OLD_FILES+=usr/bin/kenv OLD_FILES+=usr/sbin/elf2aout # 200210XX OLD_FILES+=usr/include/libusbhid.h OLD_FILES+=usr/share/man/man3/All_FreeBSD.3.gz OLD_FILES+=usr/share/man/man3/CheckRules.3.gz OLD_FILES+=usr/share/man/man3/ChunkCanBeRoot.3.gz OLD_FILES+=usr/share/man/man3/Clone_Disk.3.gz OLD_FILES+=usr/share/man/man3/Collapse_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Collapse_Disk.3.gz OLD_FILES+=usr/share/man/man3/Create_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Create_Chunk_DWIM.3.gz OLD_FILES+=usr/share/man/man3/Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Debug_Disk.3.gz OLD_FILES+=usr/share/man/man3/Delete_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Disk_Names.3.gz OLD_FILES+=usr/share/man/man3/Free_Disk.3.gz OLD_FILES+=usr/share/man/man3/MakeDev.3.gz OLD_FILES+=usr/share/man/man3/MakeDevDisk.3.gz OLD_FILES+=usr/share/man/man3/Next_Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Next_Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Open_Disk.3.gz OLD_FILES+=usr/share/man/man3/Prev_Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Prev_Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Set_Bios_Geom.3.gz OLD_FILES+=usr/share/man/man3/Set_Boot_Blocks.3.gz OLD_FILES+=usr/share/man/man3/Set_Boot_Mgr.3.gz OLD_FILES+=usr/share/man/man3/ShowChunkFlags.3.gz OLD_FILES+=usr/share/man/man3/Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Write_Disk.3.gz OLD_FILES+=usr/share/man/man3/slice_type_name.3.gz # 200210XX: most games moved to ports OLD_FILES+=usr/share/man/man6/adventure.6.gz OLD_FILES+=usr/share/man/man6/arithmetic.6.gz OLD_FILES+=usr/share/man/man6/atc.6.gz OLD_FILES+=usr/share/man/man6/backgammon.6.gz OLD_FILES+=usr/share/man/man6/battlestar.6.gz OLD_FILES+=usr/share/man/man6/bs.6.gz OLD_FILES+=usr/share/man/man6/canfield.6.gz OLD_FILES+=usr/share/man/man6/cfscores.6.gz OLD_FILES+=usr/share/man/man6/cribbage.6.gz OLD_FILES+=usr/share/man/man6/fish.6.gz OLD_FILES+=usr/share/man/man6/hack.6.gz OLD_FILES+=usr/share/man/man6/hangman.6.gz OLD_FILES+=usr/share/man/man6/larn.6.gz OLD_FILES+=usr/share/man/man6/mille.6.gz OLD_FILES+=usr/share/man/man6/phantasia.6.gz OLD_FILES+=usr/share/man/man6/piano.6.gz OLD_FILES+=usr/share/man/man6/pig.6.gz OLD_FILES+=usr/share/man/man6/quiz.6.gz OLD_FILES+=usr/share/man/man6/rain.6.gz OLD_FILES+=usr/share/man/man6/robots.6.gz OLD_FILES+=usr/share/man/man6/rogue.6.gz OLD_FILES+=usr/share/man/man6/sail.6.gz OLD_FILES+=usr/share/man/man6/snake.6.gz OLD_FILES+=usr/share/man/man6/snscore.6.gz OLD_FILES+=usr/share/man/man6/trek.6.gz OLD_FILES+=usr/share/man/man6/wargames.6.gz OLD_FILES+=usr/share/man/man6/worm.6.gz OLD_FILES+=usr/share/man/man6/worms.6.gz OLD_FILES+=usr/share/man/man6/wump.6.gz # 200207XX OLD_FILES+=usr/share/man/man1aout/ar.1aout.gz OLD_FILES+=usr/share/man/man1aout/as.1aout.gz OLD_FILES+=usr/share/man/man1aout/ld.1aout.gz OLD_FILES+=usr/share/man/man1aout/nm.1aout.gz OLD_FILES+=usr/share/man/man1aout/ranlib.1aout.gz OLD_FILES+=usr/share/man/man1aout/size.1aout.gz OLD_FILES+=usr/share/man/man1aout/strings.1aout.gz OLD_FILES+=usr/share/man/man1aout/strip.1aout.gz OLD_FILES+=bin/mountd OLD_FILES+=bin/nfsd # 20020707 sbin/nfsd -> usr.sbin/nfsd OLD_FILES+=sbin/nfsd # 200206XX OLD_FILES+=usr/lib/libpam_ssh.a OLD_FILES+=usr/lib/libpam_ssh_p.a OLD_FILES+=usr/bin/help OLD_FILES+=usr/bin/sccs .if ${TARGET_ARCH} != "amd64" && ${TARGET} != "arm" && ${TARGET_ARCH} != "i386" && ${TARGET} != "powerpc" OLD_FILES+=usr/bin/gdbserver .endif OLD_FILES+=usr/bin/ssh-keysign OLD_FILES+=usr/sbin/gifconfig OLD_FILES+=usr/sbin/prefix # 200205XX OLD_FILES+=usr/bin/doscmd # 200204XX OLD_FILES+=usr/bin/a2p OLD_FILES+=usr/bin/ptx OLD_FILES+=usr/bin/pod2text OLD_FILES+=usr/bin/pod2man OLD_FILES+=usr/bin/pod2latex OLD_FILES+=usr/bin/pod2html OLD_FILES+=usr/bin/h2ph OLD_FILES+=usr/bin/dprofpp OLD_FILES+=usr/bin/c2ph OLD_FILES+=usr/bin/h2xs OLD_FILES+=usr/bin/pl2pm OLD_FILES+=usr/bin/splain OLD_FILES+=usr/bin/s2p OLD_FILES+=usr/bin/find2perl OLD_FILES+=usr/sbin/pkg_update OLD_FILES+=usr/sbin/scriptdump # 20020409 GC kget(1), userconfig is long dead. OLD_FILES+=sbin/kget OLD_FILES+=usr/share/man/man8/kget.8.gz # 200203XX OLD_FILES+=usr/lib/libss.a OLD_FILES+=usr/lib/libss_p.a OLD_FILES+=usr/lib/libtelnet.a OLD_FILES+=usr/lib/libtelnet_p.a OLD_FILES+=usr/sbin/diskpart # 200202XX OLD_FILES+=usr/bin/gprof4 # 200201XX OLD_FILES+=usr/sbin/linux # 2001XXXX OLD_FILES+=usr/bin/joy OLD_FILES+=usr/sbin/ibcs2 OLD_FILES+=usr/sbin/svr4 OLD_FILES+=usr/bin/chflags OLD_FILES+=usr/sbin/uuconv OLD_FILES+=usr/sbin/uuchk OLD_FILES+=usr/sbin/portmap OLD_FILES+=usr/sbin/pmap_set OLD_FILES+=usr/sbin/pmap_dump OLD_FILES+=usr/sbin/mcon OLD_FILES+=usr/sbin/stlstty OLD_FILES+=usr/sbin/ispppcontrol OLD_FILES+=usr/sbin/rndcontrol # 20011001: UUCP migration to ports OLD_FILES+=usr/bin/uucp OLD_FILES+=usr/bin/uulog OLD_FILES+=usr/bin/uuname OLD_FILES+=usr/bin/uupick OLD_FILES+=usr/bin/uusched OLD_FILES+=usr/bin/uustat OLD_FILES+=usr/bin/uuto OLD_FILES+=usr/bin/uux OLD_FILES+=usr/libexec/uucp/uucico OLD_FILES+=usr/libexec/uucp/uuxqt OLD_FILES+=usr/libexec/uucpd OLD_FILES+=usr/share/man/man1/uuconv.1.gz OLD_FILES+=usr/share/man/man1/uucp.1.gz OLD_FILES+=usr/share/man/man1/uulog.1.gz OLD_FILES+=usr/share/man/man1/uuname.1.gz OLD_FILES+=usr/share/man/man1/uupick.1.gz OLD_FILES+=usr/share/man/man1/uustat.1.gz OLD_FILES+=usr/share/man/man1/uuto.1.gz OLD_FILES+=usr/share/man/man1/uux.1.gz OLD_FILES+=usr/share/man/man8/uuchk.8.gz OLD_FILES+=usr/share/man/man8/uucico.8.gz OLD_FILES+=usr/share/man/man8/uucpd.8.gz OLD_FILES+=usr/share/man/man8/uusched.8.gz OLD_FILES+=usr/share/man/man8/uuxqt.8.gz # 20010523 mount_portal -> mount_portalfs OLD_FILES+=sbin/mount_portal OLD_FILES+=usr/share/man/man8/mount_portal.8.gz # 200104XX OLD_FILES+=usr/lib/libdescrypt.a OLD_FILES+=usr/lib/libscrypt.a OLD_FILES+=usr/lib/libscrypt_p.a OLD_FILES+=usr/sbin/pim6stat OLD_FILES+=usr/sbin/pim6sd OLD_FILES+=usr/sbin/pim6dd # 20010217 OLD_FILES+=usr/share/doc/bind/misc/dns-setup # 20001200 OLD_FILES+=usr/lib/libgcc_r_pic.a # 200009XX OLD_FILES+=usr/lib/libRSAglue.a OLD_FILES+=usr/lib/libRSAglue.so OLD_FILES+=usr/lib/librsaINTL.a OLD_FILES+=usr/lib/librsaUSA.a OLD_FILES+=usr/lib/librsaUSA.so # 200002XX ? OLD_FILES+=usr/lib/libf2c.a OLD_FILES+=usr/lib/libf2c_p.a OLD_FILES+=usr/lib/libg++.a OLD_FILES+=usr/lib/libg++_p.a # 20001006 OLD_FILES+=usr/bin/miniperl # 20000810 OLD_FILES+=usr/bin/sperl # 200001XX OLD_FILES+=usr/sbin/apmconf # 199911XX OLD_FILES+=usr/sbin/ipfstat OLD_FILES+=usr/sbin/ipmon OLD_FILES+=usr/sbin/ipnat OLD_FILES+=usr/sbin/bad144 OLD_FILES+=usr/sbin/wormcontrol OLD_FILES+=usr/sbin/named-bootconf OLD_FILES+=usr/sbin/kvm_mkdb OLD_FILES+=usr/sbin/keyadmin # 199909XX OLD_FILES+=usr/lib/libdesrypt_p.a OLD_FILES+=sbin/ft # 199903XX OLD_FILES+=sbin/modload OLD_FILES+=sbin/modunload OLD_FILES+=usr/sbin/natd # 199812XX OLD_FILES+=sbin/dset # 199809XX OLD_FILES+=sbin/scsi OLD_FILES+=sbin/scsiformat OLD_FILES+=usr/sbin/ncrcontrol OLD_FILES+=usr/sbin/tickadj # 199806XX OLD_FILES+=usr/sbin/mkdosfs # 199801XX OLD_FILES+=sbin/mount_lfs OLD_FILES+=sbin/newlfs OLD_FILES+=sbin/dumplfs OLD_FILES+=usr/sbin/qcamcontrol OLD_FILES+=usr/sbin/supscan # 1997XXXX OLD_FILES+=usr/sbin/sysctl OLD_FILES+=usr/sbin/ctm_scan OLD_FILES+=usr/sbin/addgroup OLD_FILES+=usr/sbin/rmgroup # 1996XXXX OLD_FILES+=sbin/rdisc OLD_FILES+=usr/sbin/cdplay OLD_FILES+=usr/sbin/supfilesrv OLD_FILES+=usr/sbin/routed OLD_FILES+=usr/sbin/lsdev OLD_FILES+=usr/sbin/yppasswdd ## unsorted # do we still support aout builds? #OLD_FILES+=usr/lib/aout/c++rt0.o #OLD_FILES+=usr/lib/aout/crt0.o #OLD_FILES+=usr/lib/aout/gcrt0.o #OLD_FILES+=usr/lib/aout/scrt0.o #OLD_FILES+=usr/lib/aout/sgcrt0.o OLD_FILES+=usr/bin/sperl5 OLD_FILES+=usr/bin/perl5.6.0 OLD_FILES+=usr/bin/sperl5.6.0 OLD_FILES+=usr/bin/perlbc OLD_FILES+=usr/bin/perl5.00503 OLD_FILES+=usr/bin/sperl5.00503 OLD_FILES+=usr/bin/perlbug OLD_FILES+=usr/bin/perlcc OLD_FILES+=usr/bin/perldoc OLD_FILES+=usr/bin/suidperl OLD_FILES+=usr/lib/pam_ftp.so OLD_FILES+=usr/libdata/perl/5.00503/CGI/Apache.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Carp.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Cookie.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Fast.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Push.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Switch.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN/FirstTime.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN/Nox.pm OLD_FILES+=usr/libdata/perl/5.00503/Class/Struct.pm OLD_FILES+=usr/libdata/perl/5.00503/Devel/SelfStubber.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Command.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Embed.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Install.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Installed.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Liblist.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_OS2.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Unix.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_VMS.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Win32.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MakeMaker.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Manifest.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mkbootstrap.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mksymlists.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Packlist.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/inst OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/testlib.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/typemap OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/xsubpp OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Mac.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/OS2.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Unix.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/VMS.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Win32.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Basename.pm OLD_FILES+=usr/libdata/perl/5.00503/File/CheckTree.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Compare.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Copy.pm OLD_FILES+=usr/libdata/perl/5.00503/File/DosGlob.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Find.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Path.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec.pm OLD_FILES+=usr/libdata/perl/5.00503/File/stat.pm OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Long.pm OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Std.pm OLD_FILES+=usr/libdata/perl/5.00503/I18N/Collate.pm OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open2.pm OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open3.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/BigFloat.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/BigInt.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/Complex.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/Trig.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/Ping.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/hostent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/netent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/protoent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/servent.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Functions.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Html.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Text.pm OLD_FILES+=usr/libdata/perl/5.00503/Search/Dict.pm OLD_FILES+=usr/libdata/perl/5.00503/Sys/Hostname.pm OLD_FILES+=usr/libdata/perl/5.00503/Sys/Syslog.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/Cap.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/Complete.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/ReadLine.pm OLD_FILES+=usr/libdata/perl/5.00503/Test/Harness.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Abbrev.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/ParseWords.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Soundex.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Tabs.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Wrap.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Array.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Handle.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Hash.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/RefHash.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Scalar.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/SubstrHash.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/Local.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/gmtime.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/localtime.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/tm.pm OLD_FILES+=usr/libdata/perl/5.00503/User/grent.pm OLD_FILES+=usr/libdata/perl/5.00503/User/pwent.pm OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/GetOptions.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/FindOption.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Configure.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/config.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Croak.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Deparse.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/CC.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Debug.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Showlex.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/makeliblinks OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bblock.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/cc_harness OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bytecode.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Stackobj.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Xref.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Lint.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Asmdata.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Assembler.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Disassembler.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/disassemble OLD_FILES+=usr/libdata/perl/5.00503/mach/B/assemble OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Terse.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/C.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/EXTERN.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/INTERN.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSUB.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSlock.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/av.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/bytecode.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/byterun.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cc_runtime.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/config.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cop.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/dosish.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embed.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embedvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/fakethr.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/form.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/gv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/handy.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/hv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/intrpvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/iperlsys.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/keywords.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/mg.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/nostdio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objXSUB.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objpp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/op.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/opcode.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/patchlevel.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perl.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsdio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsfio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlvars.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perly.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp_proto.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/proto.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regcomp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regexp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regnodes.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/scope.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/sv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thrdvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thread.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/unixish.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/util.h OLD_FILES+=usr/libdata/perl/5.00503/mach/Data/Dumper.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Select.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Socket.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Handle.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Seekable.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Pipe.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/SysV.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Msg.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Semaphore.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/.exists OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_findfile.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_expandspec.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_find_symbol_anywhere.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/DynaLoader.a OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/extralibs.ld OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/assert.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tolower.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/toupper.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/closedir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/opendir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/readdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewinddir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/errno.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/creat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fcntl.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrnam.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atan2.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/cos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fabs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/log.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/pow.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sin.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sqrt.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwnam.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/longjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/kill.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/feof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/siglongjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sigsetjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/raise.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/offsetof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/clearerr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fclose.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fdopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgets.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fileno.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fread.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/freopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fscanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fseek.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ferror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fflush.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetpos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fsetpos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ftell.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fwrite.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getchar.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gets.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/perror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/printf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putchar.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/puts.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/remove.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rename.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewind.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/scanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sscanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tmpfile.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ungetc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vfprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vsprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/abs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atexit.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atoi.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atol.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/bsearch.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/calloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/div.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exit.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/free.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getenv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/labs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ldiv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/malloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/qsort.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rand.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/realloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/srand.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/system.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memmove.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memset.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcspn.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strerror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strlen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strpbrk.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strrchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strspn.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strstr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strtok.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chmod.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fstat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/mkdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/stat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/umask.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/wait.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/waitpid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gmtime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/localtime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/time.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/alarm.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chown.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execl.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execle.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execlp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execve.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execvp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fork.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getcwd.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getegid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/geteuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgroups.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getlogin.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpgrp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getppid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/isatty.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/link.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rmdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setbuf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setvbuf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sleep.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/unlink.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/utime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/sdbm/extralibs.ld OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Errno/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/Config.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/O.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/perllocal.pod OLD_FILES+=usr/libdata/perl/5.00503/mach/DB_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Errno.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Fcntl.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/NDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Safe.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Opcode.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/ops.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pod OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/SDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Socket.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/attrs.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/re.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/_h2ph_pre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/a.out.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_ccb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_extend.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_periph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_sim.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_periph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_sim.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/alias.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/assert.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/bitstring.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/calendar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/camlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/com_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/com_right.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/curses.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/devstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dialog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dirent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/disktab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dlfcn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/errno.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/eti.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fcntl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fetch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/float.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/floatingpoint.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fnmatch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/form.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fstab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ftpio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fts.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/glob.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/gmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/gnuregex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/grp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/histedit.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ieeefp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ifaddrs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/inttypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/iso646.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/kvm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libatm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libdisk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libgen.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libusb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libutil.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/limits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/link.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/linker_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/locale.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/login_cap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/FlexLexer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/PlotFile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/SFile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/_G_config.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algobase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/builtinbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/bvector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/complex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/defalloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/deque.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/editbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/floatio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/fstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/function.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hashtable.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/indstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iolibio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iomanip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostreamP.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/istream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iterator.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libioP.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multimap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multiset.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/new.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pair.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/parsestream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pfstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/procbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pthread_alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/rope.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ropeimpl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/slist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stdiostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algobase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_bvector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_config.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_construct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_deque.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_function.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_fun.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hashtable.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_iterator.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multimap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multiset.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_numeric.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_pair.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_raw_storage_iter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_relops.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_rope.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_slist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tempbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_uninitialized.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_vector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/streambuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strfile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tempbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/type_traits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/vector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/math.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/memory.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/menu.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mpool.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ncurses.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ndbm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netdb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nl_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nlist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objformat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/opie.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/osreldate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/panel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/paths.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-int.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-namedb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/poll.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread_np.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pwd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ranlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/regex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/regexp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/resolv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ripemd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rune.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/runetype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sched.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/search.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/semaphore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/setjmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sgtty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sha.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/skey.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stand.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdarg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stddef.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/strhash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/string.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stringlist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/strings.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/struct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sysexits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/syslog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/taclib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/tar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/tcpd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/term.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/termcap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/termios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/time.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/timers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ttyent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/unctrl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/unistd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/utime.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/utmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/values.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/varargs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vgl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/zconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/zlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/ftp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/inet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/telnet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/tftp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/assertions.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/ctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/dst.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/eventlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/irpmarshall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/logging.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/memcluster.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ansi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_bios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_segments.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asc_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asmacros.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asnames.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/atomic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bootinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_at386.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_memio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pc98.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio_ind.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cdk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/clock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/comstats.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/console.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpu.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpufunc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cputypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cronyx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/db_machdep.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/dvcfg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/exec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/float.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/floatingpoint.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/frame.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globaldata.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globals.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/gsc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_cause.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_rbch_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_tel_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_trace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ieeefp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wavelan_ieee.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wl_wavelan.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/iic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/in_cksum.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_bt848.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_ctx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_fd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_meteor.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ipl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/joystick.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/limits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/lock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/md_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mouse.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mpapic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mtpr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_dma.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/npx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcaudioio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb_ext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcvt_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/perfmon.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/physio_proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/profile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/psl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ptrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/resource.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/segments.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/setjmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sigframe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smptests.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/soundcard.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/speaker.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/specialreg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/spigot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/stdarg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sysarch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/trap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/tss.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/uc_device.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ultrasound.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/varargs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vm86.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vmparam.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/wtio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_isppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pci_cfgreg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bootsect.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bpb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/denode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/direntry.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/fat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/msdosfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpfdesc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bridge.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ethernet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/hostcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_dl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ieee80211.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_llc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_media.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_mib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_pppvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_slvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_sppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_stf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tapvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tun.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tunvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_vlan_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/intrq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/iso88025.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/net_osdep.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/netisr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/pfkeyv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_comp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_defs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/radix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/raw_cb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/route.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slcompress.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/zlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_faith.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/krpc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsdiskless.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsm_subs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsnode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsproto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrtt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrvcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nqnfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/rpcv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/xdr_subs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/aarp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_extern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/phase2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_cm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sigmgr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sys.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_vc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/kern_include.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/port.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/netgraph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_UI.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_async.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bpf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bridge.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_cisco.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_echo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ether.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_frame_relay.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_hole.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_iface.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ksocket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_lmi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_message.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_mppc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_one2many.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_parse.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pppoe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pptpgre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_rfc1490.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_sample.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socketvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tee.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_vjc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_eiface.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_etf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_device.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_l2tp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_fec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_ether.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_fddi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_hostcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_systm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_dummynet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_ecn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_encap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fil.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_flow.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_frag.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_icmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_mroute.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_nat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_proxy.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_state.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipprotosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_fsm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_seq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcpip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_gre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/icmp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_ifattach.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_prefix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_ecn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_fw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_mroute.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6protosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/mld6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/nd6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/scope6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/tcp6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/udp6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp_rijndael.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/raw_ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_ip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keydb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keysock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netnatm/natm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_cfg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_conn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_file.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_lib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_ncp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_nls.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rcfile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_sock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_user.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/nwerror.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_error.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/sp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spidp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_compr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_ihash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_inode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_vfsops.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_node.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/NXConstStr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Object.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Protocol.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/encoding.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/hash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-api.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/runtime.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/sarray.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/thr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/typedstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1_mac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/blowfish.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/buffer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cast.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/comp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf_api.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/crypto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dh.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dsa.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dso.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ebcdic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/evp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hmac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/lhash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/mdc2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/obj_mac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/objects.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs12.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs7.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rand.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ripemd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rsa.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/safestack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/sha.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl23.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl3.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/symhacks.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tls1.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tmdiff.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/txt_db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509_vfy.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509v3.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/idea.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1t.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cryptlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des_old.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/engine.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/krb5_asn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/kssl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ocsp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ossl_typ.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/eng_int.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_4758_cca_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_aep_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_atalla_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_cswift_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ncipher_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_nuron_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_sureware_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ubsec_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cardinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/driver.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/i82365.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pccard_nbk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/slot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/meciareg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcic_pci.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcicvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/mqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/posix4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/sched.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/semaphore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/dumprestore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/routed.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/rwhod.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/talkd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/timed.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/chardefs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/history.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/keymaps.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/readline.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlstdc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/tilde.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_unix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/clnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des_crypt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/key_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_clnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_rmt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_com.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_msg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc_auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/xdr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/bootparam_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/crypt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/key_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/klm_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nfs_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_cache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_callback.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_tags.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nislib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nlm_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rnusers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rquota.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rwall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/sm_inter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/spray.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypclnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yppasswd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypupdate_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypxfrd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_macros.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_appl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_mod_misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_modules.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/mit-sipb-copyright.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/_posix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ata.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/agpio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/assym.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/blist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/buf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus_private.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/callout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ccdvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdefs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdrio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/chio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/clist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/conf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cons.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/consio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/copyright.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dir.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dataacq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/device_port.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/devicestat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dirent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disklabel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/diskslice.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dkstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/domain.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dvdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf32.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf64.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_common.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_generic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/errno.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/event.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventhandler.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/exec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/extattr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fbio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fcntl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/file.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filedesc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/gmon.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inflate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/interrupt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inttypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioccom.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ipc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/jail.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/joystick.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kbio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kernel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kthread.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ktrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/libkern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lockf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/memrange.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mman.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/module.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msgbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mtio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/namei.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pciio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pipe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/poll.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/procfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/protosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/random.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/reboot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resource.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resourcevar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rman.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rtprio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/select.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sem.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/shm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signalvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/snoop.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socketvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sockio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/soundcard.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/stat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall-hide.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslimits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysproto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/systm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/taskqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/termios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/time.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timeb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timepps.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/times.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tprintf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttychars.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttycom.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydefaults.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucred.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/uio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/un.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unistd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unpcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/user.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/utsname.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vmmeter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wait.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wormio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/xrpuio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kobj.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/nlist_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mchain.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fnv_hash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/iconv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/pmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/swap_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_extern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_kern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_object.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_page.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pageout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_zone.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vnode_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/complex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdbool.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/langinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/netbios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_conn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_dev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_rq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_tran.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_trantcp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g2c.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/telnet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/elf-hints.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libusbhid.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib_vs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readpassphrase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/wchar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/wctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cast.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/castsb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptodev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptosoft.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/deflate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rijndael.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rmd160.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/skipjack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/xform.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipip_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keydb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keysock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/xform.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/bzlib.ph OLD_FILES+=usr/libdata/perl/5.00503/pod/perl.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perl5004delta.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlapio.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbook.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbot.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlcall.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldata.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldebug.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldelta.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldiag.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldsc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlembed.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq1.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq2.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq3.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq4.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq5.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq6.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq7.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlipc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq8.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq9.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlform.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfunc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlguts.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlhist.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perllocale.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perllol.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmod.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodinstall.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodlib.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlobj.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlop.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlopentut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlpod.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlport.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlre.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlref.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlreftut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlrun.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsec.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlstyle.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsub.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsyn.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlthrtut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltie.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoot.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltrap.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlvar.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxs.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxstut.pod OLD_FILES+=usr/libdata/perl/5.00503/AnyDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/AutoLoader.pm OLD_FILES+=usr/libdata/perl/5.00503/AutoSplit.pm OLD_FILES+=usr/libdata/perl/5.00503/Benchmark.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN.pm OLD_FILES+=usr/libdata/perl/5.00503/Carp.pm OLD_FILES+=usr/libdata/perl/5.00503/Cwd.pm OLD_FILES+=usr/libdata/perl/5.00503/DirHandle.pm OLD_FILES+=usr/libdata/perl/5.00503/Dumpvalue.pm OLD_FILES+=usr/libdata/perl/5.00503/English.pm OLD_FILES+=usr/libdata/perl/5.00503/Env.pm OLD_FILES+=usr/libdata/perl/5.00503/Exporter.pm OLD_FILES+=usr/libdata/perl/5.00503/Fatal.pm OLD_FILES+=usr/libdata/perl/5.00503/FileCache.pm OLD_FILES+=usr/libdata/perl/5.00503/FileHandle.pm OLD_FILES+=usr/libdata/perl/5.00503/FindBin.pm OLD_FILES+=usr/libdata/perl/5.00503/SelectSaver.pm OLD_FILES+=usr/libdata/perl/5.00503/SelfLoader.pm OLD_FILES+=usr/libdata/perl/5.00503/Shell.pm OLD_FILES+=usr/libdata/perl/5.00503/Symbol.pm OLD_FILES+=usr/libdata/perl/5.00503/Test.pm OLD_FILES+=usr/libdata/perl/5.00503/abbrev.pl OLD_FILES+=usr/libdata/perl/5.00503/UNIVERSAL.pm OLD_FILES+=usr/libdata/perl/5.00503/assert.pl OLD_FILES+=usr/libdata/perl/5.00503/autouse.pm OLD_FILES+=usr/libdata/perl/5.00503/base.pm OLD_FILES+=usr/libdata/perl/5.00503/bigfloat.pl OLD_FILES+=usr/libdata/perl/5.00503/bigint.pl OLD_FILES+=usr/libdata/perl/5.00503/bigrat.pl OLD_FILES+=usr/libdata/perl/5.00503/blib.pm OLD_FILES+=usr/libdata/perl/5.00503/cacheout.pl OLD_FILES+=usr/libdata/perl/5.00503/chat2.pl OLD_FILES+=usr/libdata/perl/5.00503/complete.pl OLD_FILES+=usr/libdata/perl/5.00503/constant.pm OLD_FILES+=usr/libdata/perl/5.00503/ctime.pl OLD_FILES+=usr/libdata/perl/5.00503/diagnostics.pm OLD_FILES+=usr/libdata/perl/5.00503/dotsh.pl OLD_FILES+=usr/libdata/perl/5.00503/dumpvar.pl OLD_FILES+=usr/libdata/perl/5.00503/exceptions.pl OLD_FILES+=usr/libdata/perl/5.00503/fastcwd.pl OLD_FILES+=usr/libdata/perl/5.00503/fields.pm OLD_FILES+=usr/libdata/perl/5.00503/find.pl OLD_FILES+=usr/libdata/perl/5.00503/finddepth.pl OLD_FILES+=usr/libdata/perl/5.00503/flush.pl OLD_FILES+=usr/libdata/perl/5.00503/ftp.pl OLD_FILES+=usr/libdata/perl/5.00503/getcwd.pl OLD_FILES+=usr/libdata/perl/5.00503/getopt.pl OLD_FILES+=usr/libdata/perl/5.00503/getopts.pl OLD_FILES+=usr/libdata/perl/5.00503/hostname.pl OLD_FILES+=usr/libdata/perl/5.00503/importenv.pl OLD_FILES+=usr/libdata/perl/5.00503/integer.pm OLD_FILES+=usr/libdata/perl/5.00503/less.pm OLD_FILES+=usr/libdata/perl/5.00503/lib.pm OLD_FILES+=usr/libdata/perl/5.00503/locale.pm OLD_FILES+=usr/libdata/perl/5.00503/look.pl OLD_FILES+=usr/libdata/perl/5.00503/newgetopt.pl OLD_FILES+=usr/libdata/perl/5.00503/open2.pl OLD_FILES+=usr/libdata/perl/5.00503/open3.pl OLD_FILES+=usr/libdata/perl/5.00503/overload.pm OLD_FILES+=usr/libdata/perl/5.00503/perl5db.pl OLD_FILES+=usr/libdata/perl/5.00503/pwd.pl OLD_FILES+=usr/libdata/perl/5.00503/shellwords.pl OLD_FILES+=usr/libdata/perl/5.00503/sigtrap.pm OLD_FILES+=usr/libdata/perl/5.00503/stat.pl OLD_FILES+=usr/libdata/perl/5.00503/strict.pm OLD_FILES+=usr/libdata/perl/5.00503/subs.pm OLD_FILES+=usr/libdata/perl/5.00503/syslog.pl OLD_FILES+=usr/libdata/perl/5.00503/tainted.pl OLD_FILES+=usr/libdata/perl/5.00503/termcap.pl OLD_FILES+=usr/libdata/perl/5.00503/timelocal.pl OLD_FILES+=usr/libdata/perl/5.00503/validate.pl OLD_FILES+=usr/libdata/perl/5.00503/vars.pm OLD_FILES+=usr/libdata/perl/5.00503/re.pm OLD_FILES+=usr/libdata/perl/5.00503/Config.pm OLD_FILES+=usr/libdata/perl/5.00503/.exists OLD_FILES+=usr/libdata/perl/5.00503/DynaLoader.pm OLD_FILES+=usr/share/perl/man/man3/AnyDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/AutoLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/AutoSplit.3.gz OLD_FILES+=usr/share/perl/man/man3/B.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Asmdata.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Assembler.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Bblock.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Bytecode.3.gz OLD_FILES+=usr/share/perl/man/man3/B::C.3.gz OLD_FILES+=usr/share/perl/man/man3/B::CC.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Debug.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Deparse.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Disassembler.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Lint.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Showlex.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Stackobj.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Terse.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Xref.3.gz OLD_FILES+=usr/share/perl/man/man3/Benchmark.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Apache.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Carp.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Cookie.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Fast.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Push.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Switch.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN::FirstTime.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN::Nox.3.gz OLD_FILES+=usr/share/perl/man/man3/Carp.3.gz OLD_FILES+=usr/share/perl/man/man3/Class::Struct.3.gz OLD_FILES+=usr/share/perl/man/man3/Config.3.gz OLD_FILES+=usr/share/perl/man/man3/Cwd.3.gz OLD_FILES+=usr/share/perl/man/man3/DB_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Data::Dumper.3.gz OLD_FILES+=usr/share/perl/man/man3/Devel::SelfStubber.3.gz OLD_FILES+=usr/share/perl/man/man3/DirHandle.3.gz OLD_FILES+=usr/share/perl/man/man3/Dumpvalue.3.gz OLD_FILES+=usr/share/perl/man/man3/DynaLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/English.3.gz OLD_FILES+=usr/share/perl/man/man3/Env.3.gz OLD_FILES+=usr/share/perl/man/man3/Exporter.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Command.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Embed.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Install.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Installed.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Liblist.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_OS2.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Unix.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_VMS.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Win32.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MakeMaker.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Manifest.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mkbootstrap.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mksymlists.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Packlist.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::testlib.3.gz OLD_FILES+=usr/share/perl/man/man3/Fatal.3.gz OLD_FILES+=usr/share/perl/man/man3/Fcntl.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Basename.3.gz OLD_FILES+=usr/share/perl/man/man3/File::CheckTree.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Compare.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Copy.3.gz OLD_FILES+=usr/share/perl/man/man3/File::DosGlob.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Find.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Path.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Mac.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::OS2.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Unix.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::VMS.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Win32.3.gz OLD_FILES+=usr/share/perl/man/man3/File::stat.3.gz OLD_FILES+=usr/share/perl/man/man3/FileCache.3.gz OLD_FILES+=usr/share/perl/man/man3/IO.3.gz OLD_FILES+=usr/share/perl/man/man3/FileHandle.3.gz OLD_FILES+=usr/share/perl/man/man3/FindBin.3.gz OLD_FILES+=usr/share/perl/man/man3/GDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Getopt::Long.3.gz OLD_FILES+=usr/share/perl/man/man3/Getopt::Std.3.gz OLD_FILES+=usr/share/perl/man/man3/I18N::Collate.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::File.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Handle.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Pipe.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Seekable.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Select.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Msg.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Open2.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Open3.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Semaphore.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::SysV.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::BigFloat.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::BigInt.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::Complex.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::Trig.3.gz OLD_FILES+=usr/share/perl/man/man3/NDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::Ping.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::hostent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::netent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::protoent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::servent.3.gz OLD_FILES+=usr/share/perl/man/man3/O.3.gz OLD_FILES+=usr/share/perl/man/man3/ODBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Opcode.3.gz OLD_FILES+=usr/share/perl/man/man3/POSIX.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Html.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text.3.gz OLD_FILES+=usr/share/perl/man/man3/SDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Safe.3.gz OLD_FILES+=usr/share/perl/man/man3/Search::Dict.3.gz OLD_FILES+=usr/share/perl/man/man3/SelectSaver.3.gz OLD_FILES+=usr/share/perl/man/man3/SelfLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/Shell.3.gz OLD_FILES+=usr/share/perl/man/man3/Socket.3.gz OLD_FILES+=usr/share/perl/man/man3/Symbol.3.gz OLD_FILES+=usr/share/perl/man/man3/re.3.gz OLD_FILES+=usr/share/perl/man/man3/Sys::Hostname.3.gz OLD_FILES+=usr/share/perl/man/man3/Sys::Syslog.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::Cap.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::Complete.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::ReadLine.3.gz OLD_FILES+=usr/share/perl/man/man3/Test.3.gz OLD_FILES+=usr/share/perl/man/man3/Test::Harness.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Abbrev.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::ParseWords.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Soundex.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Tabs.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Wrap.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Queue.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Semaphore.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Signal.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Specific.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Array.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Handle.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Hash.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::RefHash.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Scalar.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::SubstrHash.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::Local.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::gmtime.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::localtime.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::tm.3.gz OLD_FILES+=usr/share/perl/man/man3/UNIVERSAL.3.gz OLD_FILES+=usr/share/perl/man/man3/User::grent.3.gz OLD_FILES+=usr/share/perl/man/man3/User::pwent.3.gz OLD_FILES+=usr/share/perl/man/man3/attrs.3.gz OLD_FILES+=usr/share/perl/man/man3/autouse.3.gz OLD_FILES+=usr/share/perl/man/man3/base.3.gz OLD_FILES+=usr/share/perl/man/man3/blib.3.gz OLD_FILES+=usr/share/perl/man/man3/constant.3.gz OLD_FILES+=usr/share/perl/man/man3/diagnostics.3.gz OLD_FILES+=usr/share/perl/man/man3/fields.3.gz OLD_FILES+=usr/share/perl/man/man3/integer.3.gz OLD_FILES+=usr/share/perl/man/man3/less.3.gz OLD_FILES+=usr/share/perl/man/man3/lib.3.gz OLD_FILES+=usr/share/perl/man/man3/locale.3.gz OLD_FILES+=usr/share/perl/man/man3/ops.3.gz OLD_FILES+=usr/share/perl/man/man3/overload.3.gz OLD_FILES+=usr/share/perl/man/man3/sigtrap.3.gz OLD_FILES+=usr/share/perl/man/man3/strict.3.gz OLD_FILES+=usr/share/perl/man/man3/subs.3.gz OLD_FILES+=usr/share/perl/man/man3/vars.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Stash.3.gz OLD_FILES+=usr/share/perl/man/man3/ByteLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Pretty.3.gz OLD_FILES+=usr/share/perl/man/man3/Carp::Heavy.3.gz OLD_FILES+=usr/share/perl/man/man3/DB.3.gz OLD_FILES+=usr/share/perl/man/man3/DProf::DProf.3.gz OLD_FILES+=usr/share/perl/man/man3/Exporter::Heavy.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Cygwin.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Glob.3.gz OLD_FILES+=usr/share/perl/man/man3/Glob::Glob.3.gz OLD_FILES+=usr/share/perl/man/man3/Hostname::Hostname.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Dir.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Poll.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket::INET.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket::UNIX.3.gz OLD_FILES+=usr/share/perl/man/man3/Peek::Peek.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Checker.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Find.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::InputObjects.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Man.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::ParseUtils.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Parser.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Plainer.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Select.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Color.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Termcap.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Usage.3.gz OLD_FILES+=usr/share/perl/man/man3/Syslog::Syslog.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::ANSIColor.3.gz OLD_FILES+=usr/share/perl/man/man3/XSLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/attributes.3.gz OLD_FILES+=usr/share/perl/man/man3/bytes.3.gz OLD_FILES+=usr/share/perl/man/man3/charnames.3.gz OLD_FILES+=usr/share/perl/man/man3/filetest.3.gz OLD_FILES+=usr/share/perl/man/man3/open.3.gz OLD_FILES+=usr/share/perl/man/man3/utf8.3.gz OLD_FILES+=usr/share/perl/man/man3/warnings.3.gz OLD_FILES+=usr/share/perl/man/man3/warnings::register.3.gz OLD_FILES+=usr/share/perl/man/whatis OLD_FILES+=usr/share/man/man1/CA.pl.1.gz OLD_FILES+=usr/share/man/man1/asn1parse.1.gz OLD_FILES+=usr/share/man/man1/ca.1.gz OLD_FILES+=usr/share/man/man1/ciphers.1.gz OLD_FILES+=usr/share/man/man1/config.1.gz OLD_FILES+=usr/share/man/man1/crl.1.gz OLD_FILES+=usr/share/man/man1/crl2pkcs7.1.gz OLD_FILES+=usr/share/man/man1/dgst.1.gz OLD_FILES+=usr/share/man/man1/dhparam.1.gz OLD_FILES+=usr/share/man/man1/doscmd.1.gz OLD_FILES+=usr/share/man/man1/dsa.1.gz OLD_FILES+=usr/share/man/man1/dsaparam.1.gz OLD_FILES+=usr/share/man/man1/enc.1.gz OLD_FILES+=usr/share/man/man1/gendsa.1.gz OLD_FILES+=usr/share/man/man1/genrsa.1.gz OLD_FILES+=usr/share/man/man1/getNAME.1.gz OLD_FILES+=usr/share/man/man1/nseq.1.gz OLD_FILES+=usr/share/man/man1/ocsp.1.gz OLD_FILES+=usr/share/man/man1/openssl.1.gz OLD_FILES+=usr/share/man/man1/perl.1.gz OLD_FILES+=usr/share/man/man1/perl5004delta.1.gz OLD_FILES+=usr/share/man/man1/perlapio.1.gz OLD_FILES+=usr/share/man/man1/perlbook.1.gz OLD_FILES+=usr/share/man/man1/perlbot.1.gz OLD_FILES+=usr/share/man/man1/perlcall.1.gz OLD_FILES+=usr/share/man/man1/perldata.1.gz OLD_FILES+=usr/share/man/man1/perldebug.1.gz OLD_FILES+=usr/share/man/man1/perldelta.1.gz OLD_FILES+=usr/share/man/man1/perldiag.1.gz OLD_FILES+=usr/share/man/man1/perldsc.1.gz OLD_FILES+=usr/share/man/man1/perlembed.1.gz OLD_FILES+=usr/share/man/man1/perlfaq.1.gz OLD_FILES+=usr/share/man/man1/perlfaq1.1.gz OLD_FILES+=usr/share/man/man1/perlfaq2.1.gz OLD_FILES+=usr/share/man/man1/perlfaq3.1.gz OLD_FILES+=usr/share/man/man1/perlfaq4.1.gz OLD_FILES+=usr/share/man/man1/perlfaq5.1.gz OLD_FILES+=usr/share/man/man1/perlfaq6.1.gz OLD_FILES+=usr/share/man/man1/perlfaq7.1.gz OLD_FILES+=usr/share/man/man1/perlfaq8.1.gz OLD_FILES+=usr/share/man/man1/perlfaq9.1.gz OLD_FILES+=usr/share/man/man1/perlform.1.gz OLD_FILES+=usr/share/man/man1/perlfunc.1.gz OLD_FILES+=usr/share/man/man1/perlguts.1.gz OLD_FILES+=usr/share/man/man1/perlhist.1.gz OLD_FILES+=usr/share/man/man1/perlipc.1.gz OLD_FILES+=usr/share/man/man1/perllocale.1.gz OLD_FILES+=usr/share/man/man1/perllol.1.gz OLD_FILES+=usr/share/man/man1/perlmod.1.gz OLD_FILES+=usr/share/man/man1/perlmodinstall.1.gz OLD_FILES+=usr/share/man/man1/perlmodlib.1.gz OLD_FILES+=usr/share/man/man1/perlobj.1.gz OLD_FILES+=usr/share/man/man1/perlop.1.gz OLD_FILES+=usr/share/man/man1/perlopentut.1.gz OLD_FILES+=usr/share/man/man1/perlpod.1.gz OLD_FILES+=usr/share/man/man1/perlport.1.gz OLD_FILES+=usr/share/man/man1/perlre.1.gz OLD_FILES+=usr/share/man/man1/perlref.1.gz OLD_FILES+=usr/share/man/man1/perlreftut.1.gz OLD_FILES+=usr/share/man/man1/perlrun.1.gz OLD_FILES+=usr/share/man/man1/perlsec.1.gz OLD_FILES+=usr/share/man/man1/perlstyle.1.gz OLD_FILES+=usr/share/man/man1/perlsub.1.gz OLD_FILES+=usr/share/man/man1/perlsyn.1.gz OLD_FILES+=usr/share/man/man1/perlthrtut.1.gz OLD_FILES+=usr/share/man/man1/perltie.1.gz OLD_FILES+=usr/share/man/man1/perltoc.1.gz OLD_FILES+=usr/share/man/man1/perltoot.1.gz OLD_FILES+=usr/share/man/man1/perltrap.1.gz OLD_FILES+=usr/share/man/man1/perlvar.1.gz OLD_FILES+=usr/share/man/man1/perlxs.1.gz OLD_FILES+=usr/share/man/man1/perlxstut.1.gz OLD_FILES+=usr/share/man/man1/perlbug.1.gz OLD_FILES+=usr/share/man/man1/perlcc.1.gz OLD_FILES+=usr/share/man/man1/perldoc.1.gz OLD_FILES+=usr/share/man/man1/perl5005delta.1.gz OLD_FILES+=usr/share/man/man1/perlfork.1.gz OLD_FILES+=usr/share/man/man1/perlboot.1.gz OLD_FILES+=usr/share/man/man1/perltootc.1.gz OLD_FILES+=usr/share/man/man1/perldbmfilter.1.gz OLD_FILES+=usr/share/man/man1/perldebguts.1.gz OLD_FILES+=usr/share/man/man1/perlnumber.1.gz OLD_FILES+=usr/share/man/man1/perlcompile.1.gz OLD_FILES+=usr/share/man/man1/perltodo.1.gz OLD_FILES+=usr/share/man/man1/perlapi.1.gz OLD_FILES+=usr/share/man/man1/perlintern.1.gz OLD_FILES+=usr/share/man/man1/perlhack.1.gz OLD_FILES+=usr/share/man/man1/perlbc.1.gz OLD_FILES+=usr/share/man/man1/pkcs12.1.gz OLD_FILES+=usr/share/man/man1/pkcs7.1.gz OLD_FILES+=usr/share/man/man1/pkcs8.1.gz OLD_FILES+=usr/share/man/man1/rand.1.gz OLD_FILES+=usr/share/man/man1/req.1.gz OLD_FILES+=usr/share/man/man1/rsa.1.gz OLD_FILES+=usr/share/man/man1/rsautl.1.gz OLD_FILES+=usr/share/man/man1/s_client.1.gz OLD_FILES+=usr/share/man/man1/s_server.1.gz OLD_FILES+=usr/share/man/man1/sess_id.1.gz OLD_FILES+=usr/share/man/man1/smime.1.gz OLD_FILES+=usr/share/man/man1/speed.1.gz OLD_FILES+=usr/share/man/man1/spkac.1.gz OLD_FILES+=usr/share/man/man1/verify.1.gz OLD_FILES+=usr/share/man/man1/version.1.gz OLD_FILES+=usr/share/man/man1/x509.1.gz OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_dup.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_set_tartype.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_tartype.3.gz OLD_FILES+=usr/share/man/man3/archive_read_data_into_file.3.gz OLD_FILES+=usr/share/man/man3/archive_read_open_tar.3.gz OLD_FILES+=usr/share/man/man3/archive_read_support_format_gnutar.3.gz OLD_FILES+=usr/share/man/man3/cipher.3.gz OLD_FILES+=usr/share/man/man3/des_cipher.3.gz OLD_FILES+=usr/share/man/man3/des_setkey.3.gz OLD_FILES+=usr/share/man/man3/encrypt.3.gz OLD_FILES+=usr/share/man/man3/endvfsent.3.gz OLD_FILES+=usr/share/man/man3/getvfsbytype.3.gz OLD_FILES+=usr/share/man/man3/getvfsent.3.gz OLD_FILES+=usr/share/man/man3/isnanf.3.gz OLD_FILES+=usr/share/man/man3/libautofs.3.gz OLD_FILES+=usr/share/man/man3/pthread_attr_setsstack.3.gz OLD_FILES+=usr/share/man/man3/pthread_getcancelstate.3.gz OLD_FILES+=usr/share/man/man3/pthread_mutexattr_getpshared.3.gz OLD_FILES+=usr/share/man/man3/pthread_mutexattr_setpshared.3.gz OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.gz OLD_FILES+=usr/share/man/man3/setkey.3.gz OLD_FILES+=usr/share/man/man3/setvfsent.3.gz OLD_FILES+=usr/share/man/man3/ssl.3.gz OLD_FILES+=usr/share/man/man3/vfsisloadable.3.gz OLD_FILES+=usr/share/man/man3/vfsload.3.gz OLD_FILES+=usr/share/man/man4/als4000.4.gz OLD_FILES+=usr/share/man/man4/csa.4.gz OLD_FILES+=usr/share/man/man4/emu10k1.4.gz OLD_FILES+=usr/share/man/man4/euc.4.gz OLD_FILES+=usr/share/man/man4/gusc.4.gz OLD_FILES+=usr/share/man/man4/if_fwp.4.gz OLD_FILES+=usr/share/man/man4/lomac.4.gz OLD_FILES+=usr/share/man/man4/maestro3.4.gz OLD_FILES+=usr/share/man/man4/raid.4.gz OLD_FILES+=usr/share/man/man4/sbc.4.gz OLD_FILES+=usr/share/man/man4/sd.4.gz OLD_FILES+=usr/share/man/man4/snc.4.gz OLD_FILES+=usr/share/man/man4/st.4.gz OLD_FILES+=usr/share/man/man4/uaudio.4.gz OLD_FILES+=usr/share/man/man4/utf2.4.gz OLD_FILES+=usr/share/man/man4/vinumdebug.4.gz OLD_FILES+=usr/share/man/man5/disklabel.5.gz OLD_FILES+=usr/share/man/man5/dm.conf.5.gz OLD_FILES+=usr/share/man/man5/ranlib.5.gz OLD_FILES+=usr/share/man/man5/utf2.5.gz OLD_FILES+=usr/share/man/man7/groff_mwww.7.gz OLD_FILES+=usr/share/man/man7/mmroff.7.gz OLD_FILES+=usr/share/man/man7/mwww.7.gz OLD_FILES+=usr/share/man/man7/style.perl.7.gz OLD_FILES+=usr/share/man/man8/apm.8.gz OLD_FILES+=usr/share/man/man8/apmconf.8.gz OLD_FILES+=usr/share/man/man8/apmd.8.gz OLD_FILES+=usr/share/man/man8/dm.8.gz OLD_FILES+=usr/share/man/man8/pam_ftp.8.gz OLD_FILES+=usr/share/man/man8/pam_wheel.8.gz OLD_FILES+=usr/share/man/man8/sconfig.8.gz OLD_FILES+=usr/share/man/man8/ssl.8.gz OLD_FILES+=usr/share/man/man8/wlconfig.8.gz OLD_FILES+=usr/share/man/man9/CURSIG.9.gz OLD_FILES+=usr/share/man/man9/VFS_INIT.9.gz OLD_FILES+=usr/share/man/man9/at_exit.9.gz OLD_FILES+=usr/share/man/man9/at_fork.9.gz OLD_FILES+=usr/share/man/man9/cdevsw_add.9.gz OLD_FILES+=usr/share/man/man9/cdevsw_remove.9.gz OLD_FILES+=usr/share/man/man9/cv_waitq_empty.9.gz OLD_FILES+=usr/share/man/man9/cv_waitq_remove.9.gz OLD_FILES+=usr/share/man/man9/endtsleep.9.gz OLD_FILES+=usr/share/man/man9/jumbo.9.gz OLD_FILES+=usr/share/man/man9/jumbo_freem.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_alloc.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_free.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_steal.9.gz OLD_FILES+=usr/share/man/man9/jumbo_phys_to_kva.9.gz OLD_FILES+=usr/share/man/man9/jumbo_vm_init.9.gz OLD_FILES+=usr/share/man/man9/mac_biba.9.gz OLD_FILES+=usr/share/man/man9/mac_bsdextended.9.gz OLD_FILES+=usr/share/man/man9/mono_time.9.gz OLD_FILES+=usr/share/man/man9/p1003_1b.9.gz OLD_FILES+=usr/share/man/man9/pmap_prefault.9.gz OLD_FILES+=usr/share/man/man9/posix4.9.gz OLD_FILES+=usr/share/man/man9/resource_query_name.9.gz OLD_FILES+=usr/share/man/man9/resource_query_string.9.gz OLD_FILES+=usr/share/man/man9/resource_query_unit.9.gz OLD_FILES+=usr/share/man/man9/rm_at_exit.9.gz OLD_FILES+=usr/share/man/man9/rm_at_fork.9.gz OLD_FILES+=usr/share/man/man9/runtime.9.gz OLD_FILES+=usr/share/man/man9/sleepinit.9.gz OLD_FILES+=usr/share/man/man9/unsleep.9.gz OLD_FILES+=usr/share/man/ja/man1/perl.1.gz OLD_FILES+=usr/share/games/atc/Game_List OLD_FILES+=usr/share/games/atc/Killer OLD_FILES+=usr/share/games/atc/crossover OLD_FILES+=usr/share/games/atc/default OLD_FILES+=usr/share/games/atc/easy OLD_FILES+=usr/share/games/atc/game_2 OLD_FILES+=usr/share/games/larn/larnmaze OLD_FILES+=usr/share/games/larn/larnopts OLD_FILES+=usr/share/games/larn/larn.help OLD_FILES+=usr/share/games/quiz.db/africa OLD_FILES+=usr/share/games/quiz.db/america OLD_FILES+=usr/share/games/quiz.db/areas OLD_FILES+=usr/share/games/quiz.db/arith OLD_FILES+=usr/share/games/quiz.db/asia OLD_FILES+=usr/share/games/quiz.db/babies OLD_FILES+=usr/share/games/quiz.db/bard OLD_FILES+=usr/share/games/quiz.db/chinese OLD_FILES+=usr/share/games/quiz.db/collectives OLD_FILES+=usr/share/games/quiz.db/ed OLD_FILES+=usr/share/games/quiz.db/elements OLD_FILES+=usr/share/games/quiz.db/europe OLD_FILES+=usr/share/games/quiz.db/flowers OLD_FILES+=usr/share/games/quiz.db/greek OLD_FILES+=usr/share/games/quiz.db/inca OLD_FILES+=usr/share/games/quiz.db/index OLD_FILES+=usr/share/games/quiz.db/latin OLD_FILES+=usr/share/games/quiz.db/locomotive OLD_FILES+=usr/share/games/quiz.db/midearth OLD_FILES+=usr/share/games/quiz.db/morse OLD_FILES+=usr/share/games/quiz.db/murders OLD_FILES+=usr/share/games/quiz.db/poetry OLD_FILES+=usr/share/games/quiz.db/posneg OLD_FILES+=usr/share/games/quiz.db/pres OLD_FILES+=usr/share/games/quiz.db/province OLD_FILES+=usr/share/games/quiz.db/seq-easy OLD_FILES+=usr/share/games/quiz.db/seq-hard OLD_FILES+=usr/share/games/quiz.db/sexes OLD_FILES+=usr/share/games/quiz.db/sov OLD_FILES+=usr/share/games/quiz.db/spell OLD_FILES+=usr/share/games/quiz.db/state OLD_FILES+=usr/share/games/quiz.db/trek OLD_FILES+=usr/share/games/quiz.db/ucc OLD_FILES+=usr/share/games/cribbage.instr OLD_FILES+=usr/share/games/fish.instr OLD_FILES+=usr/share/games/wump.info OLD_FILES+=usr/games/hide/adventure OLD_FILES+=usr/games/hide/arithmetic OLD_FILES+=usr/games/hide/atc OLD_FILES+=usr/games/hide/backgammon OLD_FILES+=usr/games/hide/teachgammon OLD_FILES+=usr/games/hide/battlestar OLD_FILES+=usr/games/hide/bs OLD_FILES+=usr/games/hide/canfield OLD_FILES+=usr/games/hide/cribbage OLD_FILES+=usr/games/hide/fish OLD_FILES+=usr/games/hide/hack OLD_FILES+=usr/games/hide/hangman OLD_FILES+=usr/games/hide/larn OLD_FILES+=usr/games/hide/mille OLD_FILES+=usr/games/hide/phantasia OLD_FILES+=usr/games/hide/quiz OLD_FILES+=usr/games/hide/robots OLD_FILES+=usr/games/hide/rogue OLD_FILES+=usr/games/hide/sail OLD_FILES+=usr/games/hide/snake OLD_FILES+=usr/games/hide/trek OLD_FILES+=usr/games/hide/worm OLD_FILES+=usr/games/hide/wump OLD_FILES+=usr/games/adventure OLD_FILES+=usr/games/arithmetic OLD_FILES+=usr/games/atc OLD_FILES+=usr/games/backgammon OLD_FILES+=usr/games/teachgammon OLD_FILES+=usr/games/battlestar OLD_FILES+=usr/games/bs OLD_FILES+=usr/games/canfield OLD_FILES+=usr/games/cfscores OLD_FILES+=usr/games/cribbage OLD_FILES+=usr/games/dm OLD_FILES+=usr/games/fish OLD_FILES+=usr/games/hack OLD_FILES+=usr/games/hangman OLD_FILES+=usr/games/larn OLD_FILES+=usr/games/mille OLD_FILES+=usr/games/phantasia OLD_FILES+=usr/games/piano OLD_FILES+=usr/games/pig OLD_FILES+=usr/games/quiz OLD_FILES+=usr/games/rain OLD_FILES+=usr/games/robots OLD_FILES+=usr/games/rogue OLD_FILES+=usr/games/sail OLD_FILES+=usr/games/snake OLD_FILES+=usr/games/snscore OLD_FILES+=usr/games/trek OLD_FILES+=usr/games/wargames OLD_FILES+=usr/games/worm OLD_FILES+=usr/games/worms OLD_FILES+=usr/games/wump OLD_FILES+=sbin/mount_reiserfs OLD_FILES+=usr/include/cam/cam_extend.h OLD_FILES+=usr/include/dev/wi/wi_hostap.h OLD_FILES+=usr/include/disktab.h OLD_FILES+=usr/include/g++/FlexLexer.h OLD_FILES+=usr/include/g++/PlotFile.h OLD_FILES+=usr/include/g++/SFile.h OLD_FILES+=usr/include/g++/_G_config.h OLD_FILES+=usr/include/g++/algo.h OLD_FILES+=usr/include/g++/algobase.h OLD_FILES+=usr/include/g++/algorithm OLD_FILES+=usr/include/g++/alloc.h OLD_FILES+=usr/include/g++/bitset OLD_FILES+=usr/include/g++/builtinbuf.h OLD_FILES+=usr/include/g++/bvector.h OLD_FILES+=usr/include/g++/cassert OLD_FILES+=usr/include/g++/cctype OLD_FILES+=usr/include/g++/cerrno OLD_FILES+=usr/include/g++/cfloat OLD_FILES+=usr/include/g++/ciso646 OLD_FILES+=usr/include/g++/climits OLD_FILES+=usr/include/g++/clocale OLD_FILES+=usr/include/g++/cmath OLD_FILES+=usr/include/g++/complex OLD_FILES+=usr/include/g++/complex.h OLD_FILES+=usr/include/g++/csetjmp OLD_FILES+=usr/include/g++/csignal OLD_FILES+=usr/include/g++/cstdarg OLD_FILES+=usr/include/g++/cstddef OLD_FILES+=usr/include/g++/cstdio OLD_FILES+=usr/include/g++/cstdlib OLD_FILES+=usr/include/g++/cstring OLD_FILES+=usr/include/g++/ctime OLD_FILES+=usr/include/g++/cwchar OLD_FILES+=usr/include/g++/cwctype OLD_FILES+=usr/include/g++/defalloc.h OLD_FILES+=usr/include/g++/deque OLD_FILES+=usr/include/g++/deque.h OLD_FILES+=usr/include/g++/editbuf.h OLD_FILES+=usr/include/g++/exception OLD_FILES+=usr/include/g++/floatio.h OLD_FILES+=usr/include/g++/fstream OLD_FILES+=usr/include/g++/fstream.h OLD_FILES+=usr/include/g++/function.h OLD_FILES+=usr/include/g++/functional OLD_FILES+=usr/include/g++/hash_map OLD_FILES+=usr/include/g++/hash_map.h OLD_FILES+=usr/include/g++/hash_set OLD_FILES+=usr/include/g++/hash_set.h OLD_FILES+=usr/include/g++/hashtable.h OLD_FILES+=usr/include/g++/heap.h OLD_FILES+=usr/include/g++/indstream.h OLD_FILES+=usr/include/g++/iolibio.h OLD_FILES+=usr/include/g++/iomanip OLD_FILES+=usr/include/g++/iomanip.h OLD_FILES+=usr/include/g++/iosfwd OLD_FILES+=usr/include/g++/iostdio.h OLD_FILES+=usr/include/g++/iostream OLD_FILES+=usr/include/g++/iostream.h OLD_FILES+=usr/include/g++/iostreamP.h OLD_FILES+=usr/include/g++/istream.h OLD_FILES+=usr/include/g++/iterator OLD_FILES+=usr/include/g++/iterator.h OLD_FILES+=usr/include/g++/libio.h OLD_FILES+=usr/include/g++/libioP.h OLD_FILES+=usr/include/g++/list OLD_FILES+=usr/include/g++/list.h OLD_FILES+=usr/include/g++/map OLD_FILES+=usr/include/g++/map.h OLD_FILES+=usr/include/g++/memory OLD_FILES+=usr/include/g++/multimap.h OLD_FILES+=usr/include/g++/multiset.h OLD_FILES+=usr/include/g++/new OLD_FILES+=usr/include/g++/new.h OLD_FILES+=usr/include/g++/numeric OLD_FILES+=usr/include/g++/ostream.h OLD_FILES+=usr/include/g++/pair.h OLD_FILES+=usr/include/g++/parsestream.h OLD_FILES+=usr/include/g++/pfstream.h OLD_FILES+=usr/include/g++/procbuf.h OLD_FILES+=usr/include/g++/pthread_alloc OLD_FILES+=usr/include/g++/pthread_alloc.h OLD_FILES+=usr/include/g++/queue OLD_FILES+=usr/include/g++/rope OLD_FILES+=usr/include/g++/rope.h OLD_FILES+=usr/include/g++/ropeimpl.h OLD_FILES+=usr/include/g++/set OLD_FILES+=usr/include/g++/set.h OLD_FILES+=usr/include/g++/slist OLD_FILES+=usr/include/g++/slist.h OLD_FILES+=usr/include/g++/sstream OLD_FILES+=usr/include/g++/stack OLD_FILES+=usr/include/g++/stack.h OLD_FILES+=usr/include/g++/std/bastring.cc OLD_FILES+=usr/include/g++/std/bastring.h OLD_FILES+=usr/include/g++/std/complext.cc OLD_FILES+=usr/include/g++/std/complext.h OLD_FILES+=usr/include/g++/std/dcomplex.h OLD_FILES+=usr/include/g++/std/fcomplex.h OLD_FILES+=usr/include/g++/std/gslice.h OLD_FILES+=usr/include/g++/std/gslice_array.h OLD_FILES+=usr/include/g++/std/indirect_array.h OLD_FILES+=usr/include/g++/std/ldcomplex.h OLD_FILES+=usr/include/g++/std/mask_array.h OLD_FILES+=usr/include/g++/std/slice.h OLD_FILES+=usr/include/g++/std/slice_array.h OLD_FILES+=usr/include/g++/std/std_valarray.h OLD_FILES+=usr/include/g++/std/straits.h OLD_FILES+=usr/include/g++/std/valarray_array.h OLD_FILES+=usr/include/g++/std/valarray_array.tcc OLD_FILES+=usr/include/g++/std/valarray_meta.h OLD_FILES+=usr/include/g++/stdexcept OLD_FILES+=usr/include/g++/stdiostream.h OLD_FILES+=usr/include/g++/stl.h OLD_FILES+=usr/include/g++/stl_algo.h OLD_FILES+=usr/include/g++/stl_algobase.h OLD_FILES+=usr/include/g++/stl_alloc.h OLD_FILES+=usr/include/g++/stl_bvector.h OLD_FILES+=usr/include/g++/stl_config.h OLD_FILES+=usr/include/g++/stl_construct.h OLD_FILES+=usr/include/g++/stl_deque.h OLD_FILES+=usr/include/g++/stl_function.h OLD_FILES+=usr/include/g++/stl_hash_fun.h OLD_FILES+=usr/include/g++/stl_hash_map.h OLD_FILES+=usr/include/g++/stl_hash_set.h OLD_FILES+=usr/include/g++/stl_hashtable.h OLD_FILES+=usr/include/g++/stl_heap.h OLD_FILES+=usr/include/g++/stl_iterator.h OLD_FILES+=usr/include/g++/stl_list.h OLD_FILES+=usr/include/g++/stl_map.h OLD_FILES+=usr/include/g++/stl_multimap.h OLD_FILES+=usr/include/g++/stl_multiset.h OLD_FILES+=usr/include/g++/stl_numeric.h OLD_FILES+=usr/include/g++/stl_pair.h OLD_FILES+=usr/include/g++/stl_queue.h OLD_FILES+=usr/include/g++/stl_raw_storage_iter.h OLD_FILES+=usr/include/g++/stl_relops.h OLD_FILES+=usr/include/g++/stl_rope.h OLD_FILES+=usr/include/g++/stl_set.h OLD_FILES+=usr/include/g++/stl_slist.h OLD_FILES+=usr/include/g++/stl_stack.h OLD_FILES+=usr/include/g++/stl_tempbuf.h OLD_FILES+=usr/include/g++/stl_tree.h OLD_FILES+=usr/include/g++/stl_uninitialized.h OLD_FILES+=usr/include/g++/stl_vector.h OLD_FILES+=usr/include/g++/stream.h OLD_FILES+=usr/include/g++/streambuf.h OLD_FILES+=usr/include/g++/strfile.h OLD_FILES+=usr/include/g++/string OLD_FILES+=usr/include/g++/strstream OLD_FILES+=usr/include/g++/strstream.h OLD_FILES+=usr/include/g++/tempbuf.h OLD_FILES+=usr/include/g++/tree.h OLD_FILES+=usr/include/g++/type_traits.h OLD_FILES+=usr/include/g++/typeinfo OLD_FILES+=usr/include/g++/utility OLD_FILES+=usr/include/g++/valarray OLD_FILES+=usr/include/g++/vector OLD_FILES+=usr/include/g++/vector.h OLD_FILES+=usr/include/gmp.h OLD_FILES+=usr/include/isc/assertions.h OLD_FILES+=usr/include/isc/ctl.h OLD_FILES+=usr/include/isc/dst.h OLD_FILES+=usr/include/isc/eventlib.h OLD_FILES+=usr/include/isc/heap.h OLD_FILES+=usr/include/isc/irpmarshall.h OLD_FILES+=usr/include/isc/list.h OLD_FILES+=usr/include/isc/logging.h OLD_FILES+=usr/include/isc/memcluster.h OLD_FILES+=usr/include/isc/misc.h OLD_FILES+=usr/include/isc/tree.h OLD_FILES+=usr/include/machine/ansi.h OLD_FILES+=usr/include/machine/apic.h OLD_FILES+=usr/include/machine/asc_ioctl.h OLD_FILES+=usr/include/machine/asnames.h OLD_FILES+=usr/include/machine/bus_at386.h OLD_FILES+=usr/include/machine/bus_memio.h OLD_FILES+=usr/include/machine/bus_pc98.h OLD_FILES+=usr/include/machine/bus_pio.h OLD_FILES+=usr/include/machine/cdk.h OLD_FILES+=usr/include/machine/comstats.h OLD_FILES+=usr/include/machine/console.h OLD_FILES+=usr/include/machine/critical.h OLD_FILES+=usr/include/machine/cronyx.h OLD_FILES+=usr/include/machine/dvcfg.h OLD_FILES+=usr/include/machine/globaldata.h OLD_FILES+=usr/include/machine/globals.h OLD_FILES+=usr/include/machine/gsc.h OLD_FILES+=usr/include/machine/i4b_isppp.h OLD_FILES+=usr/include/machine/if_wavelan_ieee.h OLD_FILES+=usr/include/machine/iic.h OLD_FILES+=usr/include/machine/ioctl_ctx.h OLD_FILES+=usr/include/machine/ioctl_fd.h OLD_FILES+=usr/include/machine/ipl.h OLD_FILES+=usr/include/machine/lock.h OLD_FILES+=usr/include/machine/mouse.h OLD_FILES+=usr/include/machine/mpapic.h OLD_FILES+=usr/include/machine/mtpr.h OLD_FILES+=usr/include/machine/pc/msdos.h OLD_FILES+=usr/include/machine/physio_proc.h OLD_FILES+=usr/include/machine/smb.h OLD_FILES+=usr/include/machine/spigot.h OLD_FILES+=usr/include/machine/types.h OLD_FILES+=usr/include/machine/uc_device.h OLD_FILES+=usr/include/machine/ultrasound.h OLD_FILES+=usr/include/machine/wtio.h OLD_FILES+=usr/include/msdosfs/bootsect.h OLD_FILES+=usr/include/msdosfs/bpb.h OLD_FILES+=usr/include/msdosfs/denode.h OLD_FILES+=usr/include/msdosfs/direntry.h OLD_FILES+=usr/include/msdosfs/fat.h OLD_FILES+=usr/include/msdosfs/msdosfsmount.h OLD_FILES+=usr/include/net/hostcache.h OLD_FILES+=usr/include/net/if_faith.h OLD_FILES+=usr/include/net/if_ieee80211.h OLD_FILES+=usr/include/net/if_tunvar.h OLD_FILES+=usr/include/net/intrq.h OLD_FILES+=usr/include/netatm/kern_include.h OLD_FILES+=usr/include/netinet/if_fddi.h OLD_FILES+=usr/include/netinet/in_hostcache.h OLD_FILES+=usr/include/netinet/ip_flow.h OLD_FILES+=usr/include/netinet/ip_fw2.h OLD_FILES+=usr/include/netinet6/in6_prefix.h OLD_FILES+=usr/include/netns/idp.h OLD_FILES+=usr/include/netns/idp_var.h OLD_FILES+=usr/include/netns/ns.h OLD_FILES+=usr/include/netns/ns_error.h OLD_FILES+=usr/include/netns/ns_if.h OLD_FILES+=usr/include/netns/ns_pcb.h OLD_FILES+=usr/include/netns/sp.h OLD_FILES+=usr/include/netns/spidp.h OLD_FILES+=usr/include/netns/spp_debug.h OLD_FILES+=usr/include/netns/spp_timer.h OLD_FILES+=usr/include/netns/spp_var.h OLD_FILES+=usr/include/nfs/nfs.h OLD_FILES+=usr/include/nfs/nfsm_subs.h OLD_FILES+=usr/include/nfs/nfsmount.h OLD_FILES+=usr/include/nfs/nfsnode.h OLD_FILES+=usr/include/nfs/nfsrtt.h OLD_FILES+=usr/include/nfs/nfsrvcache.h OLD_FILES+=usr/include/nfs/nfsv2.h OLD_FILES+=usr/include/nfs/nqnfs.h OLD_FILES+=usr/include/ntfs/ntfs.h OLD_FILES+=usr/include/ntfs/ntfs_compr.h OLD_FILES+=usr/include/ntfs/ntfs_ihash.h OLD_FILES+=usr/include/ntfs/ntfs_inode.h OLD_FILES+=usr/include/ntfs/ntfs_subr.h OLD_FILES+=usr/include/ntfs/ntfs_vfsops.h OLD_FILES+=usr/include/ntfs/ntfsmount.h OLD_FILES+=usr/include/nwfs/nwfs.h OLD_FILES+=usr/include/nwfs/nwfs_mount.h OLD_FILES+=usr/include/nwfs/nwfs_node.h OLD_FILES+=usr/include/nwfs/nwfs_subr.h OLD_FILES+=usr/include/posix4/_semaphore.h OLD_FILES+=usr/include/posix4/aio.h OLD_FILES+=usr/include/posix4/ksem.h OLD_FILES+=usr/include/posix4/mqueue.h OLD_FILES+=usr/include/posix4/posix4.h OLD_FILES+=usr/include/posix4/sched.h OLD_FILES+=usr/include/posix4/semaphore.h OLD_DIRS+=usr/include/posix4 OLD_FILES+=usr/include/security/_pam_compat.h OLD_FILES+=usr/include/security/_pam_macros.h OLD_FILES+=usr/include/security/_pam_types.h OLD_FILES+=usr/include/security/pam_malloc.h OLD_FILES+=usr/include/security/pam_misc.h OLD_FILES+=usr/include/skey.h OLD_FILES+=usr/include/strhash.h OLD_FILES+=usr/include/struct.h OLD_FILES+=usr/include/sys/_label.h OLD_FILES+=usr/include/sys/_posix.h OLD_FILES+=usr/include/sys/bus_private.h OLD_FILES+=usr/include/sys/ccdvar.h OLD_FILES+=usr/include/sys/diskslice.h OLD_FILES+=usr/include/sys/dmap.h OLD_FILES+=usr/include/sys/inttypes.h OLD_FILES+=usr/include/sys/jumbo.h OLD_FILES+=usr/include/sys/mac_policy.h OLD_FILES+=usr/include/sys/pbioio.h OLD_FILES+=usr/include/sys/syscall-hide.h OLD_FILES+=usr/include/sys/tprintf.h OLD_FILES+=usr/include/sys/vnioctl.h OLD_FILES+=usr/include/sys/wormio.h OLD_FILES+=usr/include/telnet.h OLD_FILES+=usr/include/ufs/mfs/mfs_extern.h OLD_FILES+=usr/include/ufs/mfs/mfsnode.h OLD_FILES+=usr/include/values.h OLD_FILES+=usr/include/vm/vm_zone.h OLD_FILES+=usr/share/examples/etc/usbd.conf OLD_FILES+=usr/share/examples/meteor/README OLD_FILES+=usr/share/examples/meteor/rgb16.c OLD_FILES+=usr/share/examples/meteor/rgb24.c OLD_FILES+=usr/share/examples/meteor/test-n.c OLD_FILES+=usr/share/examples/meteor/yuvpk.c OLD_FILES+=usr/share/examples/meteor/yuvpl.c OLD_FILES+=usr/share/examples/worm/README OLD_FILES+=usr/share/examples/worm/makecdfs.sh OLD_FILES+=usr/share/groff_font/devlj4/Makefile OLD_FILES+=usr/share/groff_font/devlj4/text.map OLD_FILES+=usr/share/groff_font/devlj4/special.map OLD_FILES+=usr/share/misc/nslookup.help OLD_FILES+=usr/share/sendmail/cf/feature/nodns.m4 OLD_FILES+=usr/share/syscons/keymaps/lat-amer.kbd OLD_FILES+=usr/share/vi/catalog/ru_SU.KOI8-R OLD_FILES+=usr/share/zoneinfo/Africa/Timbuktu OLD_FILES+=usr/share/zoneinfo/Africa/Asmera OLD_FILES+=usr/share/zoneinfo/America/Buenos_Aires OLD_FILES+=usr/share/zoneinfo/America/Cordoba OLD_FILES+=usr/share/zoneinfo/America/Jujuy OLD_FILES+=usr/share/zoneinfo/America/Catamarca OLD_FILES+=usr/share/zoneinfo/America/Mendoza OLD_FILES+=usr/share/zoneinfo/America/Indianapolis OLD_FILES+=usr/share/zoneinfo/America/Louisville OLD_FILES+=usr/share/zoneinfo/America/Argentina/ComodRivadavia OLD_FILES+=usr/share/zoneinfo/Atlantic/Faeroe OLD_FILES+=usr/share/zoneinfo/Europe/Belfast OLD_FILES+=usr/share/zoneinfo/Pacific/Yap OLD_FILES+=usr/share/zoneinfo/SystemV/YST9 OLD_FILES+=usr/share/zoneinfo/SystemV/PST8 OLD_FILES+=usr/share/zoneinfo/SystemV/EST5EDT OLD_FILES+=usr/share/zoneinfo/SystemV/CST6CDT OLD_FILES+=usr/share/zoneinfo/SystemV/MST7MDT OLD_FILES+=usr/share/zoneinfo/SystemV/PST8PDT OLD_FILES+=usr/share/zoneinfo/SystemV/YST9YDT OLD_FILES+=usr/share/zoneinfo/SystemV/HST10 OLD_FILES+=usr/share/zoneinfo/SystemV/MST7 OLD_FILES+=usr/share/zoneinfo/SystemV/EST5 OLD_FILES+=usr/share/zoneinfo/SystemV/AST4ADT OLD_FILES+=usr/share/zoneinfo/SystemV/CST6 OLD_FILES+=usr/share/zoneinfo/SystemV/AST4 OLD_FILES+=usr/share/doc/ntp/accopt.htm OLD_FILES+=usr/share/doc/ntp/assoc.htm OLD_FILES+=usr/share/doc/ntp/audio.htm OLD_FILES+=usr/share/doc/ntp/authopt.htm OLD_FILES+=usr/share/doc/ntp/biblio.htm OLD_FILES+=usr/share/doc/ntp/build.htm OLD_FILES+=usr/share/doc/ntp/clockopt.htm OLD_FILES+=usr/share/doc/ntp/config.htm OLD_FILES+=usr/share/doc/ntp/confopt.htm OLD_FILES+=usr/share/doc/ntp/copyright.htm OLD_FILES+=usr/share/doc/ntp/debug.htm OLD_FILES+=usr/share/doc/ntp/driver1.htm OLD_FILES+=usr/share/doc/ntp/driver10.htm OLD_FILES+=usr/share/doc/ntp/driver11.htm OLD_FILES+=usr/share/doc/ntp/driver12.htm OLD_FILES+=usr/share/doc/ntp/driver16.htm OLD_FILES+=usr/share/doc/ntp/driver18.htm OLD_FILES+=usr/share/doc/ntp/driver19.htm OLD_FILES+=usr/share/doc/ntp/driver2.htm OLD_FILES+=usr/share/doc/ntp/driver20.htm OLD_FILES+=usr/share/doc/ntp/driver22.htm OLD_FILES+=usr/share/doc/ntp/driver23.htm OLD_FILES+=usr/share/doc/ntp/driver24.htm OLD_FILES+=usr/share/doc/ntp/driver26.htm OLD_FILES+=usr/share/doc/ntp/driver27.htm OLD_FILES+=usr/share/doc/ntp/driver28.htm OLD_FILES+=usr/share/doc/ntp/driver29.htm OLD_FILES+=usr/share/doc/ntp/driver3.htm OLD_FILES+=usr/share/doc/ntp/driver30.htm OLD_FILES+=usr/share/doc/ntp/driver32.htm OLD_FILES+=usr/share/doc/ntp/driver33.htm OLD_FILES+=usr/share/doc/ntp/driver34.htm OLD_FILES+=usr/share/doc/ntp/driver35.htm OLD_FILES+=usr/share/doc/ntp/driver36.htm OLD_FILES+=usr/share/doc/ntp/driver37.htm OLD_FILES+=usr/share/doc/ntp/driver4.htm OLD_FILES+=usr/share/doc/ntp/driver5.htm OLD_FILES+=usr/share/doc/ntp/driver6.htm OLD_FILES+=usr/share/doc/ntp/driver7.htm OLD_FILES+=usr/share/doc/ntp/driver8.htm OLD_FILES+=usr/share/doc/ntp/driver9.htm OLD_FILES+=usr/share/doc/ntp/exec.htm OLD_FILES+=usr/share/doc/ntp/extern.htm OLD_FILES+=usr/share/doc/ntp/gadget.htm OLD_FILES+=usr/share/doc/ntp/hints.htm OLD_FILES+=usr/share/doc/ntp/howto.htm OLD_FILES+=usr/share/doc/ntp/htmlprimer.htm OLD_FILES+=usr/share/doc/ntp/index.htm OLD_FILES+=usr/share/doc/ntp/kern.htm OLD_FILES+=usr/share/doc/ntp/kernpps.htm OLD_FILES+=usr/share/doc/ntp/ldisc.htm OLD_FILES+=usr/share/doc/ntp/measure.htm OLD_FILES+=usr/share/doc/ntp/miscopt.htm OLD_FILES+=usr/share/doc/ntp/monopt.htm OLD_FILES+=usr/share/doc/ntp/mx4200data.htm OLD_FILES+=usr/share/doc/ntp/notes.htm OLD_FILES+=usr/share/doc/ntp/ntpd.htm OLD_FILES+=usr/share/doc/ntp/ntpdate.htm OLD_FILES+=usr/share/doc/ntp/ntpdc.htm OLD_FILES+=usr/share/doc/ntp/ntpq.htm OLD_FILES+=usr/share/doc/ntp/ntptime.htm OLD_FILES+=usr/share/doc/ntp/ntptrace.htm OLD_FILES+=usr/share/doc/ntp/parsedata.htm OLD_FILES+=usr/share/doc/ntp/parsenew.htm OLD_FILES+=usr/share/doc/ntp/patches.htm OLD_FILES+=usr/share/doc/ntp/porting.htm OLD_FILES+=usr/share/doc/ntp/pps.htm OLD_FILES+=usr/share/doc/ntp/prefer.htm OLD_FILES+=usr/share/doc/ntp/qth.htm OLD_FILES+=usr/share/doc/ntp/quick.htm OLD_FILES+=usr/share/doc/ntp/rdebug.htm OLD_FILES+=usr/share/doc/ntp/refclock.htm OLD_FILES+=usr/share/doc/ntp/release.htm OLD_FILES+=usr/share/doc/ntp/tickadj.htm OLD_FILES+=usr/share/doc/papers/nqnfs.ascii.gz OLD_FILES+=usr/share/doc/papers/px.ascii.gz OLD_FILES+=usr/share/man/man3/exp10.3.gz OLD_FILES+=usr/share/man/man3/exp10f.3.gz OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz OLD_FILES+=usr/share/man/man3/gss_krb5_compat_des3_mic.3.gz OLD_FILES+=usr/share/man/man3/gss_krb5_copy_ccache.3.gz OLD_FILES+=usr/share/man/man3/mac_is_present_np.3.gz OLD_FILES+=usr/share/man/man3/mbmb.3.gz OLD_FILES+=usr/share/man/man3/setrunelocale.3.gz OLD_FILES+=usr/share/man/man5/usbd.conf.5.gz .if ${TARGET_ARCH} != "i386" && ${TARGET_ARCH} != "amd64" OLD_FILES+=usr/share/man/man8/boot_i386.8.gz .endif .if ${TARGET_ARCH} != "aarch64" && ${TARGET} != "arm" && \ ${TARGET_ARCH} != "powerpc" && ${TARGET_ARCH} != "powerpc64" && \ ${TARGET_ARCH} != "sparc64" && ${TARGET} != "mips" OLD_FILES+=usr/share/man/man8/ofwdump.8.gz .endif OLD_FILES+=usr/share/man/man8/mount_reiserfs.8.gz OLD_FILES+=usr/share/man/man9/VFS_START.9.gz OLD_FILES+=usr/share/man/man9/cpu_critical_exit.9.gz OLD_FILES+=usr/share/man/man9/cpu_critical_enter.9.gz OLD_FILES+=usr/share/info/annotate.info.gz OLD_FILES+=usr/share/info/tar.info.gz OLD_FILES+=usr/share/bsnmp/defs/tree.def OLD_FILES+=usr/share/bsnmp/defs/mibII_tree.def OLD_FILES+=usr/share/bsnmp/defs/netgraph_tree.def OLD_FILES+=usr/share/bsnmp/mibs/FOKUS-MIB.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-MIB.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-SNMPD.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-NETGRAPH.txt OLD_FILES+=usr/libdata/msdosfs/iso22dos OLD_FILES+=usr/libdata/msdosfs/iso72dos OLD_FILES+=usr/libdata/msdosfs/koi2dos OLD_FILES+=usr/libdata/msdosfs/koi8u2dos # The following files are *not* obsolete, they just don't get touched at # install, so don't add them: # - boot/loader.rc # - usr/share/tmac/man.local # - usr/share/tmac/mm/locale # - usr/share/tmac/mm/se_locale # - var/yp/Makefile # 20071120: shared library version bump OLD_LIBS+=usr/lib/libasn1.so.8 OLD_LIBS+=usr/lib/libgssapi.so.8 OLD_LIBS+=usr/lib/libgssapi_krb5.so.8 OLD_LIBS+=usr/lib/libhdb.so.8 OLD_LIBS+=usr/lib/libkadm5clnt.so.8 OLD_LIBS+=usr/lib/libkadm5srv.so.8 OLD_LIBS+=usr/lib/libkafs5.so.8 OLD_LIBS+=usr/lib/libkrb5.so.8 OLD_LIBS+=usr/lib/libobjc.so.2 OLD_LIBS+=usr/lib32/libgssapi.so.8 OLD_LIBS+=usr/lib32/libobjc.so.2 # 20070519: GCC 4.2 OLD_LIBS+=usr/lib/libg2c.a OLD_LIBS+=usr/lib/libg2c.so OLD_LIBS+=usr/lib/libg2c.so.2 OLD_LIBS+=usr/lib/libg2c_p.a OLD_LIBS+=usr/lib/libgcc_pic.a OLD_LIBS+=usr/lib32/libg2c.a OLD_LIBS+=usr/lib32/libg2c.so OLD_LIBS+=usr/lib32/libg2c.so.2 OLD_LIBS+=usr/lib32/libg2c_p.a OLD_LIBS+=usr/lib32/libgcc_pic.a # 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade OLD_LIBS+=lib/libcrypto.so.4 OLD_LIBS+=usr/lib/libssl.so.4 OLD_LIBS+=usr/lib32/libcrypto.so.4 OLD_LIBS+=usr/lib32/libssl.so.4 # 20060521: gethostbyaddr(3) ABI change OLD_LIBS+=usr/lib/libroken.so.8 OLD_LIBS+=lib/libatm.so.3 OLD_LIBS+=lib/libc.so.6 OLD_LIBS+=lib/libutil.so.5 OLD_LIBS+=usr/lib32/libatm.so.3 OLD_LIBS+=usr/lib32/libc.so.6 OLD_LIBS+=usr/lib32/libutil.so.5 # 20060413: shared library moved to /usr/lib OLD_LIBS+=lib/libgpib.so.1 # 20060413: libpcap.so.4 moved to /lib/ OLD_LIBS+=usr/lib/libpcap.so.4 # 20060412: libpthread.so.2 moved to /lib/ OLD_LIBS+=usr/lib/libpthread.so.2 # 20060127: revert libdisk to static-only OLD_LIBS+=usr/lib/libdisk.so.3 # 20051027: libc_r discontinued (removed 20101113) OLD_LIBS+=usr/lib/libc_r.a OLD_LIBS+=usr/lib/libc_r.so OLD_LIBS+=usr/lib/libc_r.so.7 OLD_LIBS+=usr/lib/libc_r_p.a OLD_LIBS+=usr/lib32/libc_r.a OLD_LIBS+=usr/lib32/libc_r.so OLD_LIBS+=usr/lib32/libc_r.so.7 OLD_LIBS+=usr/lib32/libc_r_p.a # 20050722: bump for 6.0-RELEASE OLD_LIBS+=lib/libalias.so.4 OLD_LIBS+=lib/libatm.so.2 OLD_LIBS+=lib/libbegemot.so.1 OLD_LIBS+=lib/libbsdxml.so.1 OLD_LIBS+=lib/libbsnmp.so.2 OLD_LIBS+=lib/libc.so.5 OLD_LIBS+=lib/libcam.so.2 OLD_LIBS+=lib/libcrypt.so.2 OLD_LIBS+=lib/libcrypto.so.3 OLD_LIBS+=lib/libdevstat.so.4 OLD_LIBS+=lib/libedit.so.4 OLD_LIBS+=lib/libgeom.so.2 OLD_LIBS+=lib/libgpib.so.0 OLD_LIBS+=lib/libipsec.so.1 OLD_LIBS+=lib/libipx.so.2 OLD_LIBS+=lib/libkiconv.so.1 OLD_LIBS+=lib/libkvm.so.2 OLD_LIBS+=lib/libm.so.3 OLD_LIBS+=lib/libmd.so.2 OLD_LIBS+=lib/libncurses.so.5 OLD_LIBS+=lib/libreadline.so.5 OLD_LIBS+=lib/libsbuf.so.2 OLD_LIBS+=lib/libufs.so.2 OLD_LIBS+=lib/libutil.so.4 OLD_LIBS+=lib/libz.so.2 OLD_LIBS+=usr/lib/libarchive.so.1 OLD_LIBS+=usr/lib/libasn1.so.7 OLD_LIBS+=usr/lib/libbluetooth.so.1 OLD_LIBS+=usr/lib/libbz2.so.1 OLD_LIBS+=usr/lib/libc_r.so.5 OLD_LIBS+=usr/lib/libcalendar.so.2 OLD_LIBS+=usr/lib/libcom_err.so.2 OLD_LIBS+=usr/lib/libdevinfo.so.2 OLD_LIBS+=usr/lib/libdialog.so.4 OLD_LIBS+=usr/lib/libfetch.so.3 OLD_LIBS+=usr/lib/libform.so.2 OLD_LIBS+=usr/lib/libftpio.so.5 OLD_LIBS+=usr/lib/libg2c.so.1 OLD_LIBS+=usr/lib/libgnuregex.so.2 OLD_LIBS+=usr/lib/libgssapi.so.7 OLD_LIBS+=usr/lib/libhdb.so.7 OLD_LIBS+=usr/lib/libhistory.so.5 OLD_LIBS+=usr/lib/libkadm5clnt.so.7 OLD_LIBS+=usr/lib/libkadm5srv.so.7 OLD_LIBS+=usr/lib/libkafs5.so.7 OLD_LIBS+=usr/lib/libkrb5.so.7 OLD_LIBS+=usr/lib/libmagic.so.1 OLD_LIBS+=usr/lib/libmenu.so.2 OLD_LIBS+=usr/lib/libmilter.so.2 OLD_LIBS+=usr/lib/libmp.so.4 OLD_LIBS+=usr/lib/libncp.so.1 OLD_LIBS+=usr/lib/libnetgraph.so.1 OLD_LIBS+=usr/lib/libngatm.so.1 OLD_LIBS+=usr/lib/libobjc.so.1 OLD_LIBS+=usr/lib/libopie.so.3 OLD_LIBS+=usr/lib/libpam.so.2 OLD_LIBS+=usr/lib/libpanel.so.2 OLD_LIBS+=usr/lib/libpcap.so.3 OLD_LIBS+=usr/lib/libpmc.so.2 OLD_LIBS+=usr/lib/libpthread.so.1 OLD_LIBS+=usr/lib/libradius.so.1 OLD_LIBS+=usr/lib/libroken.so.7 OLD_LIBS+=usr/lib/librpcsvc.so.2 OLD_LIBS+=usr/lib/libsdp.so.1 OLD_LIBS+=usr/lib/libsmb.so.1 OLD_LIBS+=usr/lib/libssh.so.2 OLD_LIBS+=usr/lib/libssl.so.3 OLD_LIBS+=usr/lib/libstdc++.so.4 OLD_LIBS+=usr/lib/libtacplus.so.1 OLD_LIBS+=usr/lib/libthr.so.1 OLD_LIBS+=usr/lib/libthread_db.so.1 OLD_LIBS+=usr/lib/libugidfw.so.1 OLD_LIBS+=usr/lib/libusbhid.so.1 OLD_LIBS+=usr/lib/libvgl.so.3 OLD_LIBS+=usr/lib/libwrap.so.3 OLD_LIBS+=usr/lib/libypclnt.so.1 OLD_LIBS+=usr/lib/pam_chroot.so.2 OLD_LIBS+=usr/lib/pam_deny.so.2 OLD_LIBS+=usr/lib/pam_echo.so.2 OLD_LIBS+=usr/lib/pam_exec.so.2 OLD_LIBS+=usr/lib/pam_ftpusers.so.2 OLD_LIBS+=usr/lib/pam_group.so.2 OLD_LIBS+=usr/lib/pam_guest.so.2 OLD_LIBS+=usr/lib/pam_krb5.so.2 OLD_LIBS+=usr/lib/pam_ksu.so.2 OLD_LIBS+=usr/lib/pam_lastlog.so.2 OLD_LIBS+=usr/lib/pam_login_access.so.2 OLD_LIBS+=usr/lib/pam_nologin.so.2 OLD_LIBS+=usr/lib/pam_opie.so.2 OLD_LIBS+=usr/lib/pam_opieaccess.so.2 OLD_LIBS+=usr/lib/pam_passwdqc.so.2 OLD_LIBS+=usr/lib/pam_permit.so.2 OLD_LIBS+=usr/lib/pam_radius.so.2 OLD_LIBS+=usr/lib/pam_rhosts.so.2 OLD_LIBS+=usr/lib/pam_rootok.so.2 OLD_LIBS+=usr/lib/pam_securetty.so.2 OLD_LIBS+=usr/lib/pam_self.so.2 OLD_LIBS+=usr/lib/pam_ssh.so.2 OLD_LIBS+=usr/lib/pam_tacplus.so.2 OLD_LIBS+=usr/lib/pam_unix.so.2 OLD_LIBS+=usr/lib/snmp_atm.so.3 OLD_LIBS+=usr/lib/snmp_mibII.so.3 OLD_LIBS+=usr/lib/snmp_netgraph.so.3 OLD_LIBS+=usr/lib/snmp_pf.so.3 # 200505XX: ? OLD_LIBS+=usr/lib/snmp_atm.so.2 OLD_LIBS+=usr/lib/snmp_mibII.so.2 OLD_LIBS+=usr/lib/snmp_netgraph.so.2 OLD_LIBS+=usr/lib/snmp_pf.so.2 # 2005XXXX: not ready for primetime yet OLD_LIBS+=usr/lib/libautofs.so.1 # 200411XX: libxpg4 removal OLD_LIBS+=usr/lib/libxpg4.so.3 # 200410XX: libm compatibility fix OLD_LIBS+=lib/libm.so.2 # 20041001: version bump OLD_LIBS+=lib/libreadline.so.4 OLD_LIBS+=usr/lib/libhistory.so.4 OLD_LIBS+=usr/lib/libopie.so.2 OLD_LIBS+=usr/lib/libpcap.so.2 # 20040925: bind9 import OLD_LIBS+=usr/lib/libisc.so.1 # 200408XX OLD_LIBS+=usr/lib/snmp_netgraph.so.1 # 200404XX OLD_LIBS+=usr/lib/libsnmp.so.1 OLD_LIBS+=usr/lib/snmp_mibII.so.1 # 200309XX OLD_LIBS+=usr/lib/libasn1.so.6 OLD_LIBS+=usr/lib/libhdb.so.6 OLD_LIBS+=usr/lib/libkadm5clnt.so.6 OLD_LIBS+=usr/lib/libkadm5srv.so.6 OLD_LIBS+=usr/lib/libkrb5.so.6 OLD_LIBS+=usr/lib/libroken.so.6 # 200304XX OLD_LIBS+=usr/lib/libc.so.4 OLD_LIBS+=usr/lib/libc_r.so.4 OLD_LIBS+=usr/lib/libdevstat.so.2 OLD_LIBS+=usr/lib/libedit.so.3 OLD_LIBS+=usr/lib/libgmp.so.3 OLD_LIBS+=usr/lib/libmp.so.3 OLD_LIBS+=usr/lib/libpam.so.1 OLD_LIBS+=usr/lib/libposix1e.so.2 OLD_LIBS+=usr/lib/libskey.so.2 OLD_LIBS+=usr/lib/libusbhid.so.0 OLD_LIBS+=usr/lib/libvgl.so.2 # 20030218: OpenSSL 0.9.7 import OLD_FILES+=usr/include/des.h OLD_FILES+=usr/lib/libdes.a OLD_FILES+=usr/lib/libdes.so OLD_LIBS+=usr/lib/libdes.so.3 OLD_FILES+=usr/lib/libdes_p.a # 200302XX OLD_LIBS+=usr/lib/libacl.so.3 OLD_LIBS+=usr/lib/libasn1.so.5 OLD_LIBS+=usr/lib/libcrypto.so.2 OLD_LIBS+=usr/lib/libgssapi.so.5 OLD_LIBS+=usr/lib/libhdb.so.5 OLD_LIBS+=usr/lib/libkadm.so.3 OLD_LIBS+=usr/lib/libkadm5clnt.so.5 OLD_LIBS+=usr/lib/libkadm5srv.so.5 OLD_LIBS+=usr/lib/libkafs.so.3 OLD_LIBS+=usr/lib/libkafs5.so.5 OLD_LIBS+=usr/lib/libkdb.so.3 OLD_LIBS+=usr/lib/libkrb.so.3 OLD_LIBS+=usr/lib/libroken.so. OLD_LIBS+=usr/lib/libssl.so.2 OLD_LIBS+=usr/lib/pam_kerberosIV.so # 200208XX OLD_LIBS+=usr/lib/libgssapi.so.4 # 200203XX OLD_LIBS+=usr/lib/libss.so.3 OLD_LIBS+=usr/lib/libusb.so.0 # 200112XX OLD_LIBS+=usr/lib/libfetch.so.2 # 200110XX OLD_LIBS+=usr/lib/libgssapi.so.3 # 200104XX OLD_LIBS+=usr/lib/libdescrypt.so.2 OLD_LIBS+=usr/lib/libscrypt.so.2 # 200102XX OLD_LIBS+=usr/lib/libcrypto.so.1 OLD_LIBS+=usr/lib/libssl.so.1 # 200009XX OLD_LIBS+=usr/lib/libRSAglue.so.1 OLD_LIBS+=usr/lib/librsaINTL.so.1 OLD_LIBS+=usr/lib/librsaUSA.so.1 # 200006XX OLD_LIBS+=usr/lib/libalias.so.3 OLD_LIBS+=usr/lib/libfetch.so.1 OLD_LIBS+=usr/lib/libipsec.so.0 # 200005XX OLD_LIBS+=usr/lib/libxpg4.so.2 # 200002XX OLD_LIBS+=usr/lib/libc.so.3 OLD_LIBS+=usr/lib/libcurses.so.2 OLD_LIBS+=usr/lib/libdialog.so.3 OLD_LIBS+=usr/lib/libedit.so.2 OLD_LIBS+=usr/lib/libf2c.so.2 OLD_LIBS+=usr/lib/libftpio.so.4 OLD_LIBS+=usr/lib/libg++.so.4 OLD_LIBS+=usr/lib/libhistory.so.3 OLD_LIBS+=usr/lib/libmytinfo.so.2 OLD_LIBS+=usr/lib/libncurses.so.3 OLD_LIBS+=usr/lib/libreadline.so.3 OLD_LIBS+=usr/lib/libss.so.2 OLD_LIBS+=usr/lib/libtermcap.so.2 OLD_LIBS+=usr/lib/libutil.so.2 OLD_LIBS+=usr/lib/libvgl.so.1 OLD_LIBS+=usr/lib/libwrap.so.2 # 19991216 OLD_FILES+=usr/sbin/xntpdc # 199909XX OLD_LIBS+=usr/lib/libc_r.so.3 # ??? OLD_LIBS+=usr/lib/libarchive.so.2 OLD_LIBS+=usr/lib/libbsnmp.so.1 OLD_LIBS+=usr/lib/libc_r.so.6 OLD_LIBS+=usr/lib32/libarchive.so.2 OLD_LIBS+=usr/lib32/libc_r.so.6 OLD_LIBS+=usr/lib/libcipher.so.2 OLD_LIBS+=usr/lib/libgssapi.so.6 OLD_LIBS+=usr/lib/libkse.so.1 OLD_LIBS+=usr/lib/liblwres.so.3 OLD_LIBS+=usr/lib/pam_ftp.so.2 # 20131013: Removal of the ATF tools OLD_DIRS+=etc/atf OLD_DIRS+=usr/share/examples/atf OLD_DIRS+=usr/share/xml/atf OLD_DIRS+=usr/share/xml OLD_DIRS+=usr/share/xsl/atf OLD_DIRS+=usr/share/xsl # 20040925: bind9 import OLD_DIRS+=usr/share/doc/bind/html OLD_DIRS+=usr/share/doc/bind/misc OLD_DIRS+=usr/share/doc/bind/ # ??? OLD_DIRS+=usr/include/g++/std OLD_DIRS+=usr/include/msdosfs OLD_DIRS+=usr/include/ntfs OLD_DIRS+=usr/include/nwfs OLD_DIRS+=usr/include/ufs/mfs # 20011001: UUCP migration to ports OLD_DIRS+=usr/libexec/uucp .include "tools/build/mk/OptionalObsoleteFiles.inc" Index: projects/clang500-import/bin/dd/tests/dd2_test.sh =================================================================== --- projects/clang500-import/bin/dd/tests/dd2_test.sh (revision 319548) +++ projects/clang500-import/bin/dd/tests/dd2_test.sh (revision 319549) @@ -1,50 +1,54 @@ # # Copyright (c) 2017 Spectra Logic Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS # ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # $FreeBSD$ atf_test_case seek_overflow -seek_overflow_head() { +seek_overflow_head() +{ atf_set "descr" "dd(1) should reject too-large seek values" } -seek_overflow_body() { +seek_overflow_body() +{ + atf_expect_fail "fails with 'dd: truncating f.out: File too large' - bug 219757" + touch f.in # Positive tests seek=`echo "2^63 / 4096 - 1" | bc` atf_check -s exit:0 -e ignore dd if=f.in of=f.out bs=4096 seek=$seek # Negative tests seek=`echo "2^63 / 4096" | bc` atf_check -s not-exit:0 -e match:"seek offsets cannot be larger than" \ dd if=f.in of=f.out bs=4096 seek=$seek atf_check -s not-exit:0 -e match:"seek offsets cannot be larger than" \ dd if=f.in of=f.out bs=4096 seek=-1 } atf_init_test_cases() { atf_add_test_case seek_overflow } Index: projects/clang500-import/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S =================================================================== --- projects/clang500-import/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S (revision 319548) +++ projects/clang500-import/contrib/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S (revision 319549) @@ -1,258 +1,258 @@ #include "sanitizer_common/sanitizer_asm.h" .section .bss .type __tsan_pointer_chk_guard, %object .size __tsan_pointer_chk_guard, 8 __tsan_pointer_chk_guard: .zero 8 .section .text // GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp // functions) by XORing them with a random guard pointer. For AArch64 it is a // global variable rather than a TCB one (as for x86_64/powerpc) and althought // its value is exported by the loader, it lies within a private GLIBC // namespace (meaning it should be only used by GLIBC itself and the ABI is // not stable). So InitializeGuardPtr obtains the pointer guard value by // issuing a setjmp and checking the resulting pointers values against the // original ones. .hidden _Z18InitializeGuardPtrv .global _Z18InitializeGuardPtrv .type _Z18InitializeGuardPtrv, @function _Z18InitializeGuardPtrv: CFI_STARTPROC // Allocates a jmp_buf for the setjmp call. stp x29, x30, [sp, -336]! CFI_DEF_CFA_OFFSET (336) CFI_OFFSET (29, -336) CFI_OFFSET (30, -328) add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) add x0, x29, 24 // Call libc setjmp that mangle the stack pointer value adrp x1, :got:_ZN14__interception12real__setjmpE ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE] ldr x1, [x1] blr x1 // glibc setjmp mangles both the frame pointer (FP, pc+4 on blr) and the // stack pointer (SP). FP will be placed on ((uintptr*)jmp_buf)[11] and // SP at ((uintptr*)jmp_buf)[13]. // The mangle operation is just 'value' xor 'pointer guard value' and // if we know the original value (SP) and the expected one, we can derive // the guard pointer value. mov x0, sp // Loads the mangled SP pointer. ldr x1, [x29, 128] eor x0, x0, x1 adrp x2, __tsan_pointer_chk_guard str x0, [x2, #:lo12:__tsan_pointer_chk_guard] ldp x29, x30, [sp], 336 CFI_RESTORE (30) CFI_RESTORE (19) CFI_DEF_CFA (31, 0) ret CFI_ENDPROC .size _Z18InitializeGuardPtrv, .-_Z18InitializeGuardPtrv .hidden __tsan_setjmp .comm _ZN14__interception11real_setjmpE,8,8 .type setjmp, @function setjmp: CFI_STARTPROC // save env parameters for function call stp x29, x30, [sp, -32]! CFI_DEF_CFA_OFFSET (32) CFI_OFFSET (29, -32) CFI_OFFSET (30, -24) // Adjust the SP for previous frame add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) // Save jmp_buf str x19, [sp, 16] CFI_OFFSET (19, -16) mov x19, x0 // SP pointer mangling (see glibc setjmp) adrp x2, __tsan_pointer_chk_guard ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard] add x0, x29, 32 eor x1, x2, x0 // call tsan interceptor bl __tsan_setjmp // restore env parameter mov x0, x19 ldr x19, [sp, 16] ldp x29, x30, [sp], 32 CFI_RESTORE (30) CFI_RESTORE (19) CFI_DEF_CFA (31, 0) // tail jump to libc setjmp adrp x1, :got:_ZN14__interception11real_setjmpE ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE] ldr x1, [x1] br x1 CFI_ENDPROC .size setjmp, .-setjmp .comm _ZN14__interception12real__setjmpE,8,8 .globl _setjmp .type _setjmp, @function _setjmp: CFI_STARTPROC // save env parameters for function call stp x29, x30, [sp, -32]! CFI_DEF_CFA_OFFSET (32) CFI_OFFSET (29, -32) CFI_OFFSET (30, -24) // Adjust the SP for previous frame add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) // Save jmp_buf str x19, [sp, 16] CFI_OFFSET (19, -16) mov x19, x0 // SP pointer mangling (see glibc setjmp) adrp x2, __tsan_pointer_chk_guard ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard] add x0, x29, 32 eor x1, x2, x0 // call tsan interceptor bl __tsan_setjmp // Restore jmp_buf parameter mov x0, x19 ldr x19, [sp, 16] ldp x29, x30, [sp], 32 CFI_RESTORE (30) CFI_RESTORE (19) CFI_DEF_CFA (31, 0) // tail jump to libc setjmp adrp x1, :got:_ZN14__interception12real__setjmpE ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE] ldr x1, [x1] br x1 CFI_ENDPROC .size _setjmp, .-_setjmp .comm _ZN14__interception14real_sigsetjmpE,8,8 .globl sigsetjmp .type sigsetjmp, @function sigsetjmp: CFI_STARTPROC // save env parameters for function call stp x29, x30, [sp, -32]! CFI_DEF_CFA_OFFSET (32) CFI_OFFSET (29, -32) CFI_OFFSET (30, -24) // Adjust the SP for previous frame add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) // Save jmp_buf and savesigs stp x19, x20, [sp, 16] CFI_OFFSET (19, -16) CFI_OFFSET (20, -8) mov w20, w1 mov x19, x0 // SP pointer mangling (see glibc setjmp) adrp x2, __tsan_pointer_chk_guard ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard] add x0, x29, 32 eor x1, x2, x0 // call tsan interceptor bl __tsan_setjmp // restore env parameter mov w1, w20 mov x0, x19 ldp x19, x20, [sp, 16] ldp x29, x30, [sp], 32 CFI_RESTORE (30) CFI_RESTORE (29) CFI_RESTORE (19) CFI_RESTORE (20) CFI_DEF_CFA (31, 0) // tail jump to libc sigsetjmp adrp x2, :got:_ZN14__interception14real_sigsetjmpE ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE] ldr x2, [x2] br x2 CFI_ENDPROC .size sigsetjmp, .-sigsetjmp .comm _ZN14__interception16real___sigsetjmpE,8,8 .globl __sigsetjmp .type __sigsetjmp, @function __sigsetjmp: CFI_STARTPROC // save env parameters for function call stp x29, x30, [sp, -32]! CFI_DEF_CFA_OFFSET (32) CFI_OFFSET (29, -32) CFI_OFFSET (30, -24) // Adjust the SP for previous frame add x29, sp, 0 CFI_DEF_CFA_REGISTER (29) // Save jmp_buf and savesigs stp x19, x20, [sp, 16] CFI_OFFSET (19, -16) CFI_OFFSET (20, -8) mov w20, w1 mov x19, x0 // SP pointer mangling (see glibc setjmp) adrp x2, __tsan_pointer_chk_guard ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard] add x0, x29, 32 eor x1, x2, x0 // call tsan interceptor bl __tsan_setjmp mov w1, w20 mov x0, x19 ldp x19, x20, [sp, 16] ldp x29, x30, [sp], 32 CFI_RESTORE (30) CFI_RESTORE (29) CFI_RESTORE (19) CFI_RESTORE (20) CFI_DEF_CFA (31, 0) // tail jump to libc __sigsetjmp adrp x2, :got:_ZN14__interception16real___sigsetjmpE ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE] ldr x2, [x2] br x2 CFI_ENDPROC .size __sigsetjmp, .-__sigsetjmp -#if defined(__linux__) +#if defined(__FreeBSD__) || defined(__linux__) /* We do not need executable stack. */ .section .note.GNU-stack,"",@progbits #endif Index: projects/clang500-import/contrib/compiler-rt =================================================================== --- projects/clang500-import/contrib/compiler-rt (revision 319548) +++ projects/clang500-import/contrib/compiler-rt (revision 319549) Property changes on: projects/clang500-import/contrib/compiler-rt ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/compiler-rt:r318964-319547 Index: projects/clang500-import/contrib/xz/src/liblzma/check/crc32_x86.S =================================================================== --- projects/clang500-import/contrib/xz/src/liblzma/check/crc32_x86.S (revision 319548) +++ projects/clang500-import/contrib/xz/src/liblzma/check/crc32_x86.S (revision 319549) @@ -1,304 +1,304 @@ /* * Speed-optimized CRC32 using slicing-by-eight algorithm * * This uses only i386 instructions, but it is optimized for i686 and later * (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). For i586 * (e.g. Pentium), slicing-by-four would be better, and even the C version * of slicing-by-eight built with gcc -march=i586 tends to be a little bit * better than this. Very few probably run this code on i586 or older x86 * so this shouldn't be a problem in practice. * * Authors: Igor Pavlov (original version) * Lasse Collin (AT&T syntax, PIC support, better portability) * * This file has been put into the public domain. * You can do whatever you want with this file. * * This code needs lzma_crc32_table, which can be created using the * following C code: uint32_t lzma_crc32_table[8][256]; void init_table(void) { // IEEE-802.3 static const uint32_t poly32 = UINT32_C(0xEDB88320); // Castagnoli // static const uint32_t poly32 = UINT32_C(0x82F63B78); // Koopman // static const uint32_t poly32 = UINT32_C(0xEB31D82E); for (size_t s = 0; s < 8; ++s) { for (size_t b = 0; b < 256; ++b) { uint32_t r = s == 0 ? b : lzma_crc32_table[s - 1][b]; for (size_t i = 0; i < 8; ++i) { if (r & 1) r = (r >> 1) ^ poly32; else r >>= 1; } lzma_crc32_table[s][b] = r; } } } * The prototype of the CRC32 function: * extern uint32_t lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc); */ /* * On some systems, the functions need to be prefixed. The prefix is * usually an underscore. */ #ifndef __USER_LABEL_PREFIX__ # define __USER_LABEL_PREFIX__ #endif #define MAKE_SYM_CAT(prefix, sym) prefix ## sym #define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym) #define LZMA_CRC32 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32) #define LZMA_CRC32_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc32_table) /* * Solaris assembler doesn't have .p2align, and Darwin uses .align * differently than GNU/Linux and Solaris. */ #if defined(__APPLE__) || defined(__MSDOS__) # define ALIGN(pow2, abs) .align pow2 #else # define ALIGN(pow2, abs) .align abs #endif .text .globl LZMA_CRC32 #if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \ && !defined(__MSDOS__) .type LZMA_CRC32, @function #endif ALIGN(4, 16) LZMA_CRC32: /* * Register usage: * %eax crc * %esi buf * %edi size or buf + size * %ebx lzma_crc32_table * %ebp Table index * %ecx Temporary * %edx Temporary */ pushl %ebx pushl %esi pushl %edi pushl %ebp movl 0x14(%esp), %esi /* buf */ movl 0x18(%esp), %edi /* size */ movl 0x1C(%esp), %eax /* crc */ /* * Store the address of lzma_crc32_table to %ebx. This is needed to * get position-independent code (PIC). * * The PIC macro is defined by libtool, while __PIC__ is defined * by GCC but only on some systems. Testing for both makes it simpler * to test this code without libtool, and keeps the code working also * when built with libtool but using something else than GCC. * * I understood that libtool may define PIC on Windows even though * the code in Windows DLLs is not PIC in sense that it is in ELF * binaries, so we need a separate check to always use the non-PIC * code on Windows. */ #if (!defined(PIC) && !defined(__PIC__)) \ || (defined(_WIN32) || defined(__CYGWIN__)) /* Not PIC */ movl $ LZMA_CRC32_TABLE, %ebx #elif defined(__APPLE__) /* Mach-O */ call .L_get_pc .L_pic: leal .L_lzma_crc32_table$non_lazy_ptr-.L_pic(%ebx), %ebx movl (%ebx), %ebx #else /* ELF */ call .L_get_pc addl $_GLOBAL_OFFSET_TABLE_, %ebx movl LZMA_CRC32_TABLE@GOT(%ebx), %ebx #endif /* Complement the initial value. */ notl %eax ALIGN(4, 16) .L_align: /* * Check if there is enough input to use slicing-by-eight. * We need 16 bytes, because the loop pre-reads eight bytes. */ cmpl $16, %edi jb .L_rest /* Check if we have reached alignment of eight bytes. */ testl $7, %esi jz .L_slice /* Calculate CRC of the next input byte. */ movzbl (%esi), %ebp incl %esi movzbl %al, %ecx xorl %ecx, %ebp shrl $8, %eax xorl (%ebx, %ebp, 4), %eax decl %edi jmp .L_align ALIGN(2, 4) .L_slice: /* * If we get here, there's at least 16 bytes of aligned input * available. Make %edi multiple of eight bytes. Store the possible * remainder over the "size" variable in the argument stack. */ movl %edi, 0x18(%esp) andl $-8, %edi subl %edi, 0x18(%esp) /* * Let %edi be buf + size - 8 while running the main loop. This way * we can compare for equality to determine when exit the loop. */ addl %esi, %edi subl $8, %edi /* Read in the first eight aligned bytes. */ xorl (%esi), %eax movl 4(%esi), %ecx movzbl %cl, %ebp .L_loop: movl 0x0C00(%ebx, %ebp, 4), %edx movzbl %ch, %ebp xorl 0x0800(%ebx, %ebp, 4), %edx shrl $16, %ecx xorl 8(%esi), %edx movzbl %cl, %ebp xorl 0x0400(%ebx, %ebp, 4), %edx movzbl %ch, %ebp xorl (%ebx, %ebp, 4), %edx movzbl %al, %ebp /* * Read the next four bytes, for which the CRC is calculated * on the next interation of the loop. */ movl 12(%esi), %ecx xorl 0x1C00(%ebx, %ebp, 4), %edx movzbl %ah, %ebp shrl $16, %eax xorl 0x1800(%ebx, %ebp, 4), %edx movzbl %ah, %ebp movzbl %al, %eax movl 0x1400(%ebx, %eax, 4), %eax addl $8, %esi xorl %edx, %eax xorl 0x1000(%ebx, %ebp, 4), %eax /* Check for end of aligned input. */ cmpl %edi, %esi movzbl %cl, %ebp jne .L_loop /* * Process the remaining eight bytes, which we have already * copied to %ecx and %edx. */ movl 0x0C00(%ebx, %ebp, 4), %edx movzbl %ch, %ebp xorl 0x0800(%ebx, %ebp, 4), %edx shrl $16, %ecx movzbl %cl, %ebp xorl 0x0400(%ebx, %ebp, 4), %edx movzbl %ch, %ebp xorl (%ebx, %ebp, 4), %edx movzbl %al, %ebp xorl 0x1C00(%ebx, %ebp, 4), %edx movzbl %ah, %ebp shrl $16, %eax xorl 0x1800(%ebx, %ebp, 4), %edx movzbl %ah, %ebp movzbl %al, %eax movl 0x1400(%ebx, %eax, 4), %eax addl $8, %esi xorl %edx, %eax xorl 0x1000(%ebx, %ebp, 4), %eax /* Copy the number of remaining bytes to %edi. */ movl 0x18(%esp), %edi .L_rest: /* Check for end of input. */ testl %edi, %edi jz .L_return /* Calculate CRC of the next input byte. */ movzbl (%esi), %ebp incl %esi movzbl %al, %ecx xorl %ecx, %ebp shrl $8, %eax xorl (%ebx, %ebp, 4), %eax decl %edi jmp .L_rest .L_return: /* Complement the final value. */ notl %eax popl %ebp popl %edi popl %esi popl %ebx ret #if defined(PIC) || defined(__PIC__) ALIGN(4, 16) .L_get_pc: movl (%esp), %ebx ret #endif #if defined(__APPLE__) && (defined(PIC) || defined(__PIC__)) /* Mach-O PIC */ .section __IMPORT,__pointers,non_lazy_symbol_pointers .L_lzma_crc32_table$non_lazy_ptr: .indirect_symbol LZMA_CRC32_TABLE .long 0 #elif defined(_WIN32) || defined(__CYGWIN__) # ifdef DLL_EXPORT /* This is equivalent of __declspec(dllexport). */ .section .drectve .ascii " -export:lzma_crc32" # endif #elif !defined(__MSDOS__) /* ELF */ .size LZMA_CRC32, .-LZMA_CRC32 #endif /* * This is needed to support non-executable stack. It's ugly to * use __linux__ here, but I don't know a way to detect when * we are using GNU assembler. */ -#if defined(__ELF__) && defined(__linux__) +#if defined(__ELF__) && (defined(__FreeBSD__) || defined(__linux__)) .section .note.GNU-stack,"",@progbits #endif Index: projects/clang500-import/contrib/xz/src/liblzma/check/crc64_x86.S =================================================================== --- projects/clang500-import/contrib/xz/src/liblzma/check/crc64_x86.S (revision 319548) +++ projects/clang500-import/contrib/xz/src/liblzma/check/crc64_x86.S (revision 319549) @@ -1,287 +1,287 @@ /* * Speed-optimized CRC64 using slicing-by-four algorithm * * This uses only i386 instructions, but it is optimized for i686 and later * (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). * * Authors: Igor Pavlov (original CRC32 assembly code) * Lasse Collin (CRC64 adaptation of the modified CRC32 code) * * This file has been put into the public domain. * You can do whatever you want with this file. * * This code needs lzma_crc64_table, which can be created using the * following C code: uint64_t lzma_crc64_table[4][256]; void init_table(void) { // ECMA-182 static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42); for (size_t s = 0; s < 4; ++s) { for (size_t b = 0; b < 256; ++b) { uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b]; for (size_t i = 0; i < 8; ++i) { if (r & 1) r = (r >> 1) ^ poly64; else r >>= 1; } lzma_crc64_table[s][b] = r; } } } * The prototype of the CRC64 function: * extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc); */ /* * On some systems, the functions need to be prefixed. The prefix is * usually an underscore. */ #ifndef __USER_LABEL_PREFIX__ # define __USER_LABEL_PREFIX__ #endif #define MAKE_SYM_CAT(prefix, sym) prefix ## sym #define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym) #define LZMA_CRC64 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64) #define LZMA_CRC64_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64_table) /* * Solaris assembler doesn't have .p2align, and Darwin uses .align * differently than GNU/Linux and Solaris. */ #if defined(__APPLE__) || defined(__MSDOS__) # define ALIGN(pow2, abs) .align pow2 #else # define ALIGN(pow2, abs) .align abs #endif .text .globl LZMA_CRC64 #if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \ && !defined(__MSDOS__) .type LZMA_CRC64, @function #endif ALIGN(4, 16) LZMA_CRC64: /* * Register usage: * %eax crc LSB * %edx crc MSB * %esi buf * %edi size or buf + size * %ebx lzma_crc64_table * %ebp Table index * %ecx Temporary */ pushl %ebx pushl %esi pushl %edi pushl %ebp movl 0x14(%esp), %esi /* buf */ movl 0x18(%esp), %edi /* size */ movl 0x1C(%esp), %eax /* crc LSB */ movl 0x20(%esp), %edx /* crc MSB */ /* * Store the address of lzma_crc64_table to %ebx. This is needed to * get position-independent code (PIC). * * The PIC macro is defined by libtool, while __PIC__ is defined * by GCC but only on some systems. Testing for both makes it simpler * to test this code without libtool, and keeps the code working also * when built with libtool but using something else than GCC. * * I understood that libtool may define PIC on Windows even though * the code in Windows DLLs is not PIC in sense that it is in ELF * binaries, so we need a separate check to always use the non-PIC * code on Windows. */ #if (!defined(PIC) && !defined(__PIC__)) \ || (defined(_WIN32) || defined(__CYGWIN__)) /* Not PIC */ movl $ LZMA_CRC64_TABLE, %ebx #elif defined(__APPLE__) /* Mach-O */ call .L_get_pc .L_pic: leal .L_lzma_crc64_table$non_lazy_ptr-.L_pic(%ebx), %ebx movl (%ebx), %ebx #else /* ELF */ call .L_get_pc addl $_GLOBAL_OFFSET_TABLE_, %ebx movl LZMA_CRC64_TABLE@GOT(%ebx), %ebx #endif /* Complement the initial value. */ notl %eax notl %edx .L_align: /* * Check if there is enough input to use slicing-by-four. * We need eight bytes, because the loop pre-reads four bytes. */ cmpl $8, %edi jb .L_rest /* Check if we have reached alignment of four bytes. */ testl $3, %esi jz .L_slice /* Calculate CRC of the next input byte. */ movzbl (%esi), %ebp incl %esi movzbl %al, %ecx xorl %ecx, %ebp shrdl $8, %edx, %eax xorl (%ebx, %ebp, 8), %eax shrl $8, %edx xorl 4(%ebx, %ebp, 8), %edx decl %edi jmp .L_align .L_slice: /* * If we get here, there's at least eight bytes of aligned input * available. Make %edi multiple of four bytes. Store the possible * remainder over the "size" variable in the argument stack. */ movl %edi, 0x18(%esp) andl $-4, %edi subl %edi, 0x18(%esp) /* * Let %edi be buf + size - 4 while running the main loop. This way * we can compare for equality to determine when exit the loop. */ addl %esi, %edi subl $4, %edi /* Read in the first four aligned bytes. */ movl (%esi), %ecx .L_loop: xorl %eax, %ecx movzbl %cl, %ebp movl 0x1800(%ebx, %ebp, 8), %eax xorl %edx, %eax movl 0x1804(%ebx, %ebp, 8), %edx movzbl %ch, %ebp xorl 0x1000(%ebx, %ebp, 8), %eax xorl 0x1004(%ebx, %ebp, 8), %edx shrl $16, %ecx movzbl %cl, %ebp xorl 0x0800(%ebx, %ebp, 8), %eax xorl 0x0804(%ebx, %ebp, 8), %edx movzbl %ch, %ebp addl $4, %esi xorl (%ebx, %ebp, 8), %eax xorl 4(%ebx, %ebp, 8), %edx /* Check for end of aligned input. */ cmpl %edi, %esi /* * Copy the next input byte to %ecx. It is slightly faster to * read it here than at the top of the loop. */ movl (%esi), %ecx jb .L_loop /* * Process the remaining four bytes, which we have already * copied to %ecx. */ xorl %eax, %ecx movzbl %cl, %ebp movl 0x1800(%ebx, %ebp, 8), %eax xorl %edx, %eax movl 0x1804(%ebx, %ebp, 8), %edx movzbl %ch, %ebp xorl 0x1000(%ebx, %ebp, 8), %eax xorl 0x1004(%ebx, %ebp, 8), %edx shrl $16, %ecx movzbl %cl, %ebp xorl 0x0800(%ebx, %ebp, 8), %eax xorl 0x0804(%ebx, %ebp, 8), %edx movzbl %ch, %ebp addl $4, %esi xorl (%ebx, %ebp, 8), %eax xorl 4(%ebx, %ebp, 8), %edx /* Copy the number of remaining bytes to %edi. */ movl 0x18(%esp), %edi .L_rest: /* Check for end of input. */ testl %edi, %edi jz .L_return /* Calculate CRC of the next input byte. */ movzbl (%esi), %ebp incl %esi movzbl %al, %ecx xorl %ecx, %ebp shrdl $8, %edx, %eax xorl (%ebx, %ebp, 8), %eax shrl $8, %edx xorl 4(%ebx, %ebp, 8), %edx decl %edi jmp .L_rest .L_return: /* Complement the final value. */ notl %eax notl %edx popl %ebp popl %edi popl %esi popl %ebx ret #if defined(PIC) || defined(__PIC__) ALIGN(4, 16) .L_get_pc: movl (%esp), %ebx ret #endif #if defined(__APPLE__) && (defined(PIC) || defined(__PIC__)) /* Mach-O PIC */ .section __IMPORT,__pointers,non_lazy_symbol_pointers .L_lzma_crc64_table$non_lazy_ptr: .indirect_symbol LZMA_CRC64_TABLE .long 0 #elif defined(_WIN32) || defined(__CYGWIN__) # ifdef DLL_EXPORT /* This is equivalent of __declspec(dllexport). */ .section .drectve .ascii " -export:lzma_crc64" # endif #elif !defined(__MSDOS__) /* ELF */ .size LZMA_CRC64, .-LZMA_CRC64 #endif /* * This is needed to support non-executable stack. It's ugly to * use __linux__ here, but I don't know a way to detect when * we are using GNU assembler. */ -#if defined(__ELF__) && defined(__linux__) +#if defined(__ELF__) && (defined(__FreeBSD__) || defined(__linux__)) .section .note.GNU-stack,"",@progbits #endif Index: projects/clang500-import/contrib/xz =================================================================== --- projects/clang500-import/contrib/xz (revision 319548) +++ projects/clang500-import/contrib/xz (revision 319549) Property changes on: projects/clang500-import/contrib/xz ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/xz:r316992-319547 Index: projects/clang500-import/lib/libc/sys/Makefile.inc =================================================================== --- projects/clang500-import/lib/libc/sys/Makefile.inc (revision 319548) +++ projects/clang500-import/lib/libc/sys/Makefile.inc (revision 319549) @@ -1,490 +1,489 @@ # @(#)Makefile.inc 8.3 (Berkeley) 10/24/94 # $FreeBSD$ # sys sources .PATH: ${LIBC_SRCTOP}/${LIBC_ARCH}/sys ${LIBC_SRCTOP}/sys # Include the generated makefile containing the *complete* list # of syscall names in MIASM. .include "${SRCTOP}/sys/sys/syscall.mk" # Include machine dependent definitions. # # MDASM names override the default syscall names in MIASM. # NOASM will prevent the default syscall code from being generated. # PSEUDO generates _() and __sys_() symbols, but not (). # # While historically machine dependent, all architectures have the following # declarations in common: # NOASM= break.o \ exit.o \ getlogin.o \ sstk.o \ yield.o PSEUDO= _exit.o \ _getlogin.o .sinclude "${LIBC_SRCTOP}/${LIBC_ARCH}/sys/Makefile.inc" SRCS+= clock_gettime.c gettimeofday.c __vdso_gettimeofday.c NOASM+= clock_gettime.o gettimeofday.o PSEUDO+= _clock_gettime.o _gettimeofday.o # Sources common to both syscall interfaces: SRCS+= \ __error.c \ interposing_table.c SRCS+= getdents.c lstat.c mknod.c stat.c SRCS+= futimens.c utimensat.c NOASM+= futimens.o utimensat.o PSEUDO+= _futimens.o _utimensat.o SRCS+= pipe.c INTERPOSED = \ accept \ accept4 \ aio_suspend \ clock_nanosleep \ close \ connect \ fcntl \ fdatasync \ fsync \ fork \ kevent \ msync \ nanosleep \ open \ openat \ poll \ ppoll \ pselect \ ptrace \ read \ readv \ recvfrom \ recvmsg \ select \ sendmsg \ sendto \ setcontext \ sigprocmask \ sigsuspend \ sigtimedwait \ sigwait \ sigwaitinfo \ swapcontext \ wait4 \ wait6 \ write \ writev .if ${MACHINE_CPUARCH} == "sparc64" SRCS+= sigaction.c NOASM+= sigaction.o .else INTERPOSED+= sigaction .endif SRCS+= ${INTERPOSED:S/$/.c/} NOASM+= ${INTERPOSED:S/$/.o/} PSEUDO+= ${INTERPOSED:C/^.*$/_&.o/} # Add machine dependent asm sources: SRCS+=${MDASM} # Look though the complete list of syscalls (MIASM) for names that are # not defined with machine dependent implementations (MDASM) and are # not declared for no generation of default code (NOASM). Add each # syscall that satisfies these conditions to the ASM list. .for _asm in ${MIASM} .if (${MDASM:R:M${_asm:R}} == "") .if (${NOASM:R:M${_asm:R}} == "") ASM+=$(_asm) .endif .endif .endfor SASM= ${ASM:S/.o/.S/} SPSEUDO= ${PSEUDO:S/.o/.S/} SRCS+= ${SASM} ${SPSEUDO} SYM_MAPS+= ${LIBC_SRCTOP}/sys/Symbol.map # Generated files CLEANFILES+= ${SASM} ${SPSEUDO} .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" || \ ${MACHINE_CPUARCH} == "powerpc" || ${MACHINE_ARCH:Marmv6*} NOTE_GNU_STACK='\t.section .note.GNU-stack,"",%%progbits\n' .else NOTE_GNU_STACK='' .endif ${SASM}: printf '#include "compat.h"\n' > ${.TARGET} printf '#include "SYS.h"\nRSYSCALL(${.PREFIX})\n' >> ${.TARGET} printf ${NOTE_GNU_STACK} >>${.TARGET} ${SPSEUDO}: printf '#include "compat.h"\n' > ${.TARGET} printf '#include "SYS.h"\nPSEUDO(${.PREFIX:S/_//})\n' \ >> ${.TARGET} printf ${NOTE_GNU_STACK} >>${.TARGET} MAN+= abort2.2 \ accept.2 \ access.2 \ acct.2 \ adjtime.2 \ aio_cancel.2 \ aio_error.2 \ aio_fsync.2 \ aio_mlock.2 \ aio_read.2 \ aio_return.2 \ aio_suspend.2 \ aio_waitcomplete.2 \ aio_write.2 \ bind.2 \ bindat.2 \ brk.2 \ cap_enter.2 \ cap_fcntls_limit.2 \ cap_ioctls_limit.2 \ cap_rights_limit.2 \ chdir.2 \ chflags.2 \ chmod.2 \ chown.2 \ chroot.2 \ clock_gettime.2 \ close.2 \ closefrom.2 \ connect.2 \ connectat.2 \ cpuset.2 \ cpuset_getaffinity.2 \ dup.2 \ execve.2 \ _exit.2 \ extattr_get_file.2 \ fcntl.2 \ ffclock.2 \ fhopen.2 \ flock.2 \ fork.2 \ fsync.2 \ getdirentries.2 \ getdtablesize.2 \ getfh.2 \ getfsstat.2 \ getgid.2 \ getgroups.2 \ getitimer.2 \ getlogin.2 \ getloginclass.2 \ getpeername.2 \ getpgrp.2 \ getpid.2 \ getpriority.2 \ getrlimit.2 \ getrusage.2 \ getsid.2 \ getsockname.2 \ getsockopt.2 \ gettimeofday.2 \ getuid.2 \ intro.2 \ ioctl.2 \ issetugid.2 \ jail.2 \ kenv.2 \ kill.2 \ kldfind.2 \ kldfirstmod.2 \ kldload.2 \ kldnext.2 \ kldstat.2 \ kldsym.2 \ kldunload.2 \ kqueue.2 \ ktrace.2 \ link.2 \ lio_listio.2 \ listen.2 \ lseek.2 \ madvise.2 \ mincore.2 \ minherit.2 \ mkdir.2 \ mkfifo.2 \ mknod.2 \ mlock.2 \ mlockall.2 \ mmap.2 \ modfind.2 \ modnext.2 \ modstat.2 \ mount.2 \ mprotect.2 \ mq_close.2 \ mq_getattr.2 \ mq_notify.2 \ mq_open.2 \ mq_receive.2 \ mq_send.2 \ mq_setattr.2 \ msgctl.2 \ msgget.2 \ msgrcv.2 \ msgsnd.2 \ msync.2 \ munmap.2 \ nanosleep.2 \ nfssvc.2 \ ntp_adjtime.2 \ numa_getaffinity.2 \ open.2 \ pathconf.2 \ pdfork.2 \ pipe.2 \ poll.2 \ posix_fadvise.2 \ posix_fallocate.2 \ posix_openpt.2 \ procctl.2 \ profil.2 \ pselect.2 \ ptrace.2 \ quotactl.2 \ rctl_add_rule.2 \ read.2 \ readlink.2 \ reboot.2 \ recv.2 \ rename.2 \ revoke.2 \ rfork.2 \ rmdir.2 \ rtprio.2 .if !defined(NO_P1003_1B) MAN+= sched_get_priority_max.2 \ sched_setparam.2 \ sched_setscheduler.2 \ sched_yield.2 .endif MAN+= sctp_generic_recvmsg.2 \ sctp_generic_sendmsg.2 \ sctp_peeloff.2 \ select.2 \ semctl.2 \ semget.2 \ semop.2 \ send.2 \ setfib.2 \ sendfile.2 \ setgroups.2 \ setpgid.2 \ setregid.2 \ setresuid.2 \ setreuid.2 \ setsid.2 \ setuid.2 \ shmat.2 \ shmctl.2 \ shmget.2 \ shm_open.2 \ shutdown.2 \ sigaction.2 \ sigaltstack.2 \ sigpending.2 \ sigprocmask.2 \ sigqueue.2 \ sigreturn.2 \ sigstack.2 \ sigsuspend.2 \ sigwait.2 \ sigwaitinfo.2 \ socket.2 \ socketpair.2 \ stat.2 \ statfs.2 \ swapon.2 \ symlink.2 \ sync.2 \ sysarch.2 \ syscall.2 \ thr_exit.2 \ thr_kill.2 \ thr_new.2 \ thr_self.2 \ thr_set_name.2 \ thr_suspend.2 \ thr_wake.2 \ timer_create.2 \ timer_delete.2 \ timer_settime.2 \ truncate.2 \ umask.2 \ undelete.2 \ unlink.2 \ utimensat.2 \ utimes.2 \ utrace.2 \ uuidgen.2 \ vfork.2 \ wait.2 \ write.2 \ _umtx_op.2 MLINKS+=accept.2 accept4.2 MLINKS+=access.2 eaccess.2 \ access.2 faccessat.2 MLINKS+=brk.2 sbrk.2 MLINKS+=cap_enter.2 cap_getmode.2 MLINKS+=cap_fcntls_limit.2 cap_fcntls_get.2 MLINKS+=cap_ioctls_limit.2 cap_ioctls_get.2 -MLINKS+=cap_rights_limit.2 cap_rights_get.2 MLINKS+=chdir.2 fchdir.2 MLINKS+=chflags.2 chflagsat.2 \ chflags.2 fchflags.2 \ chflags.2 lchflags.2 MLINKS+=chmod.2 fchmod.2 \ chmod.2 fchmodat.2 \ chmod.2 lchmod.2 MLINKS+=chown.2 fchown.2 \ chown.2 fchownat.2 \ chown.2 lchown.2 MLINKS+=clock_gettime.2 clock_getres.2 \ clock_gettime.2 clock_settime.2 MLINKS+=nanosleep.2 clock_nanosleep.2 MLINKS+=cpuset.2 cpuset_getid.2 \ cpuset.2 cpuset_setid.2 MLINKS+=cpuset_getaffinity.2 cpuset_setaffinity.2 MLINKS+=dup.2 dup2.2 MLINKS+=execve.2 fexecve.2 MLINKS+=extattr_get_file.2 extattr.2 \ extattr_get_file.2 extattr_delete_fd.2 \ extattr_get_file.2 extattr_delete_file.2 \ extattr_get_file.2 extattr_delete_link.2 \ extattr_get_file.2 extattr_get_fd.2 \ extattr_get_file.2 extattr_get_link.2 \ extattr_get_file.2 extattr_list_fd.2 \ extattr_get_file.2 extattr_list_file.2 \ extattr_get_file.2 extattr_list_link.2 \ extattr_get_file.2 extattr_set_fd.2 \ extattr_get_file.2 extattr_set_file.2 \ extattr_get_file.2 extattr_set_link.2 MLINKS+=ffclock.2 ffclock_getcounter.2 \ ffclock.2 ffclock_getestimate.2 \ ffclock.2 ffclock_setestimate.2 MLINKS+=fhopen.2 fhstat.2 fhopen.2 fhstatfs.2 MLINKS+=fsync.2 fdatasync.2 MLINKS+=getdirentries.2 getdents.2 MLINKS+=getfh.2 lgetfh.2 MLINKS+=getgid.2 getegid.2 MLINKS+=getitimer.2 setitimer.2 MLINKS+=getlogin.2 getlogin_r.3 MLINKS+=getlogin.2 setlogin.2 MLINKS+=getloginclass.2 setloginclass.2 MLINKS+=getpgrp.2 getpgid.2 MLINKS+=getpid.2 getppid.2 MLINKS+=getpriority.2 setpriority.2 MLINKS+=getrlimit.2 setrlimit.2 MLINKS+=getsockopt.2 setsockopt.2 MLINKS+=gettimeofday.2 settimeofday.2 MLINKS+=getuid.2 geteuid.2 MLINKS+=intro.2 errno.2 MLINKS+=jail.2 jail_attach.2 \ jail.2 jail_get.2 \ jail.2 jail_remove.2 \ jail.2 jail_set.2 MLINKS+=kldunload.2 kldunloadf.2 MLINKS+=kqueue.2 kevent.2 \ kqueue.2 EV_SET.3 MLINKS+=link.2 linkat.2 MLINKS+=madvise.2 posix_madvise.2 MLINKS+=mkdir.2 mkdirat.2 MLINKS+=mkfifo.2 mkfifoat.2 MLINKS+=mknod.2 mknodat.2 MLINKS+=mlock.2 munlock.2 MLINKS+=mlockall.2 munlockall.2 MLINKS+=modnext.2 modfnext.2 MLINKS+=mount.2 nmount.2 \ mount.2 unmount.2 MLINKS+=mq_receive.2 mq_timedreceive.2 MLINKS+=mq_send.2 mq_timedsend.2 MLINKS+=ntp_adjtime.2 ntp_gettime.2 MLINKS+=numa_getaffinity.2 numa_setaffinity.2 MLINKS+=open.2 openat.2 MLINKS+=pathconf.2 fpathconf.2 MLINKS+=pathconf.2 lpathconf.2 MLINKS+=pdfork.2 pdgetpid.2\ pdfork.2 pdkill.2 \ pdfork.2 pdwait4.2 MLINKS+=pipe.2 pipe2.2 MLINKS+=poll.2 ppoll.2 MLINKS+=rctl_add_rule.2 rctl_get_limits.2 \ rctl_add_rule.2 rctl_get_racct.2 \ rctl_add_rule.2 rctl_get_rules.2 \ rctl_add_rule.2 rctl_remove_rule.2 MLINKS+=read.2 pread.2 \ read.2 preadv.2 \ read.2 readv.2 MLINKS+=readlink.2 readlinkat.2 MLINKS+=recv.2 recvfrom.2 \ recv.2 recvmsg.2 MLINKS+=rename.2 renameat.2 MLINKS+=rtprio.2 rtprio_thread.2 .if !defined(NO_P1003_1B) MLINKS+=sched_get_priority_max.2 sched_get_priority_min.2 \ sched_get_priority_max.2 sched_rr_get_interval.2 MLINKS+=sched_setparam.2 sched_getparam.2 MLINKS+=sched_setscheduler.2 sched_getscheduler.2 .endif MLINKS+=select.2 FD_CLR.3 \ select.2 FD_ISSET.3 \ select.2 FD_SET.3 \ select.2 FD_ZERO.3 MLINKS+=send.2 sendmsg.2 \ send.2 sendto.2 MLINKS+=setpgid.2 setpgrp.2 MLINKS+=setresuid.2 getresgid.2 \ setresuid.2 getresuid.2 \ setresuid.2 setresgid.2 MLINKS+=setuid.2 setegid.2 \ setuid.2 seteuid.2 \ setuid.2 setgid.2 MLINKS+=shmat.2 shmdt.2 MLINKS+=shm_open.2 shm_unlink.2 MLINKS+=sigwaitinfo.2 sigtimedwait.2 MLINKS+=stat.2 fstat.2 \ stat.2 fstatat.2 \ stat.2 lstat.2 MLINKS+=statfs.2 fstatfs.2 MLINKS+=swapon.2 swapoff.2 MLINKS+=symlink.2 symlinkat.2 MLINKS+=syscall.2 __syscall.2 MLINKS+=timer_settime.2 timer_getoverrun.2 \ timer_settime.2 timer_gettime.2 MLINKS+=thr_kill.2 thr_kill2.2 MLINKS+=truncate.2 ftruncate.2 MLINKS+=unlink.2 unlinkat.2 MLINKS+=utimensat.2 futimens.2 MLINKS+=utimes.2 futimes.2 \ utimes.2 futimesat.2 \ utimes.2 lutimes.2 MLINKS+=wait.2 wait3.2 \ wait.2 wait4.2 \ wait.2 waitpid.2 \ wait.2 waitid.2 \ wait.2 wait6.2 MLINKS+=write.2 pwrite.2 \ write.2 pwritev.2 \ write.2 writev.2 Index: projects/clang500-import/lib/libsysdecode/Makefile =================================================================== --- projects/clang500-import/lib/libsysdecode/Makefile (revision 319548) +++ projects/clang500-import/lib/libsysdecode/Makefile (revision 319549) @@ -1,129 +1,130 @@ # $FreeBSD$ .include PACKAGE=lib${LIB} LIB= sysdecode SRCS= errno.c flags.c ioctl.c signal.c syscallnames.c utrace.c INCS= sysdecode.h CFLAGS+= -I${.OBJDIR} CFLAGS+= -I${SRCTOP}/sys CFLAGS+= -I${SRCTOP}/libexec/rtld-elf MAN= sysdecode.3 \ sysdecode_abi_to_freebsd_errno.3 \ sysdecode_cap_rights.3 \ sysdecode_enum.3 \ sysdecode_fcntl_arg.3 \ sysdecode_ioctlname.3 \ sysdecode_mask.3 \ sysdecode_quotactl_cmd.3 \ sysdecode_sigcode.3 \ sysdecode_sockopt_name.3 \ sysdecode_socket_protocol.3 \ sysdecode_syscallnames.3 \ sysdecode_utrace.3 MLINKS= sysdecode_abi_to_freebsd_errno.3 sysdecode_freebsd_to_abi_errno.3 MLINKS+=sysdecode_enum.3 sysdecode_acltype.3 \ sysdecode_enum.3 sysdecode_atfd.3 \ sysdecode_enum.3 sysdecode_extattrnamespace.3 \ sysdecode_enum.3 sysdecode_fadvice.3 \ sysdecode_enum.3 sysdecode_fcntl_cmd.3 \ sysdecode_enum.3 sysdecode_getfsstat_mode.3 \ + sysdecode_enum.3 sysdecode_getrusage_who.3 \ sysdecode_enum.3 sysdecode_idtype.3 \ sysdecode_enum.3 sysdecode_ipproto.3 \ sysdecode_enum.3 sysdecode_kldsym_cmd.3 \ sysdecode_enum.3 sysdecode_kldunload_flags.3 \ sysdecode_enum.3 sysdecode_lio_listio_mode.3 \ sysdecode_enum.3 sysdecode_madvice.3 \ sysdecode_enum.3 sysdecode_minherit_flags.3 \ sysdecode_enum.3 sysdecode_msgctl_cmd.3 \ sysdecode_enum.3 sysdecode_nfssvc_flags.3 \ sysdecode_enum.3 sysdecode_prio_which.3 \ sysdecode_enum.3 sysdecode_procctl_cmd.3 \ sysdecode_enum.3 sysdecode_ptrace_request.3 \ sysdecode_enum.3 sysdecode_rlimit.3 \ sysdecode_enum.3 sysdecode_rtprio_function.3 \ sysdecode_enum.3 sysdecode_scheduler_policy.3 \ sysdecode_enum.3 sysdecode_semctl_cmd.3 \ sysdecode_enum.3 sysdecode_shmctl_cmd.3 \ sysdecode_enum.3 sysdecode_shutdown_how.3 \ sysdecode_enum.3 sysdecode_sigbus_code.3 \ sysdecode_enum.3 sysdecode_sigchld_code.3 \ sysdecode_enum.3 sysdecode_sigfpe_code.3 \ sysdecode_enum.3 sysdecode_sigill_code.3 \ sysdecode_enum.3 sysdecode_signal.3 \ sysdecode_enum.3 sysdecode_sigprocmask_how.3 \ sysdecode_enum.3 sysdecode_sigsegv_code.3 \ sysdecode_enum.3 sysdecode_sigtrap_code.3 \ sysdecode_enum.3 sysdecode_sockaddr_family.3 \ sysdecode_enum.3 sysdecode_socketdomain.3 \ sysdecode_enum.3 sysdecode_sockettype.3 \ sysdecode_enum.3 sysdecode_sockopt_level.3 \ sysdecode_enum.3 sysdecode_umtx_op.3 \ sysdecode_enum.3 sysdecode_vmresult.3 \ sysdecode_enum.3 sysdecode_whence.3 MLINKS+=sysdecode_fcntl_arg.3 sysdecode_fcntl_arg_p.3 MLINKS+=sysdecode_mask.3 sysdecode_accessmode.3 \ sysdecode_mask.3 sysdecode_capfcntlrights.3 \ sysdecode_mask.3 sysdecode_fcntl_fileflags.3 \ sysdecode_mask.3 sysdecode_fileflags.3 \ sysdecode_mask.3 sysdecode_filemode.3 \ sysdecode_mask.3 sysdecode_flock_operation.3 \ sysdecode_mask.3 sysdecode_mlockall_flags.3 \ sysdecode_mask.3 sysdecode_mmap_flags.3 \ sysdecode_mask.3 sysdecode_mmap_prot.3 \ sysdecode_mask.3 sysdecode_mount_flags.3 \ sysdecode_mask.3 sysdecode_msg_flags.3 \ sysdecode_mask.3 sysdecode_msync_flags.3 \ sysdecode_mask.3 sysdecode_open_flags.3 \ sysdecode_mask.3 sysdecode_pipe2_flags.3 \ sysdecode_mask.3 sysdecode_reboot_howto.3 \ sysdecode_mask.3 sysdecode_rfork_flags.3 \ sysdecode_mask.3 sysdecode_semget_flags.3 \ sysdecode_mask.3 sysdecode_sendfile_flags.3 \ sysdecode_mask.3 sysdecode_shmat_flags.3 \ sysdecode_mask.3 sysdecode_socket_type.3 \ sysdecode_mask.3 sysdecode_thr_create_flags.3 \ sysdecode_mask.3 sysdecode_umtx_cvwait_flags.3 \ sysdecode_mask.3 sysdecode_umtx_rwlock_flags.3 \ sysdecode_mask.3 sysdecode_vmprot.3 \ sysdecode_mask.3 sysdecode_wait4_options.3 \ sysdecode_mask.3 sysdecode_wait6_options.3 CLEANFILES= ioctl.c tables.h .if defined(COMPAT_32BIT) CPP+= -m32 .endif .if ${MK_PF} != "no" CFLAGS+=-DPF .endif # Workaround duplicate declarations in CFLAGS.gcc.ioctl.c+= -Wno-redundant-decls # Workaround warning for unused ssi_cables[] in CFLAGS.gcc.ioctl.c+= -Wno-unused CFLAGS.gcc+= ${CFLAGS.gcc.${.IMPSRC}} DEPENDOBJS+= tables.h tables.h: mktables sh ${.CURDIR}/mktables ${DESTDIR}${INCLUDEDIR} ${.TARGET} # mkioctls runs find(1) for headers so needs to rebuild every time. This used # to be a hack only done in buildworld. .if !defined(_SKIP_BUILD) ioctl.c: .PHONY .endif ioctl.c: mkioctls .META env CPP="${CPP}" \ /bin/sh ${.CURDIR}/mkioctls ${DESTDIR}${INCLUDEDIR} > ${.TARGET} beforedepend: ioctl.c tables.h .include Index: projects/clang500-import/lib/libsysdecode/flags.c =================================================================== --- projects/clang500-import/lib/libsysdecode/flags.c (revision 319548) +++ projects/clang500-import/lib/libsysdecode/flags.c (revision 319549) @@ -1,1000 +1,1007 @@ /* * Copyright (c) 2006 "David Kirchner" . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define L2CAP_SOCKET_CHECKED #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * This is taken from the xlat tables originally in truss which were * in turn taken from strace. */ struct name_table { uintmax_t val; const char *str; }; #define X(a) { a, #a }, #define XEND { 0, NULL } #define TABLE_START(n) static struct name_table n[] = { #define TABLE_ENTRY X #define TABLE_END XEND }; #include "tables.h" #undef TABLE_START #undef TABLE_ENTRY #undef TABLE_END /* * These are simple support macros. print_or utilizes a variable * defined in the calling function to track whether or not it should * print a logical-OR character ('|') before a string. if_print_or * simply handles the necessary "if" statement used in many lines * of this file. */ #define print_or(fp,str,orflag) do { \ if (orflag) fputc(fp, '|'); else orflag = true; \ fprintf(fp, str); } \ while (0) #define if_print_or(fp,i,flag,orflag) do { \ if ((i & flag) == flag) \ print_or(fp,#flag,orflag); } \ while (0) static const char * lookup_value(struct name_table *table, uintmax_t val) { for (; table->str != NULL; table++) if (table->val == val) return (table->str); return (NULL); } /* * Used when the value maps to a bitmask of #definition values in the * table. This is a helper routine which outputs a symbolic mask of * matched masks. Multiple masks are separated by a pipe ('|'). * The value is modified on return to only hold unmatched bits. */ static void print_mask_part(FILE *fp, struct name_table *table, uintmax_t *valp, bool *printed) { uintmax_t rem; rem = *valp; for (; table->str != NULL; table++) { if ((table->val & rem) == table->val) { /* * Only print a zero mask if the raw value is * zero. */ if (table->val == 0 && *valp != 0) continue; fprintf(fp, "%s%s", *printed ? "|" : "", table->str); *printed = true; rem &= ~table->val; } } *valp = rem; } /* * Used when the value maps to a bitmask of #definition values in the * table. The return value is true if something was printed. If * rem is not NULL, *rem holds any bits not decoded if something was * printed. If nothing was printed and rem is not NULL, *rem holds * the original value. */ static bool print_mask_int(FILE *fp, struct name_table *table, int ival, int *rem) { uintmax_t val; bool printed; printed = false; val = (unsigned)ival; print_mask_part(fp, table, &val, &printed); if (rem != NULL) *rem = val; return (printed); } /* * Used for a mask of optional flags where a value of 0 is valid. */ static bool print_mask_0(FILE *fp, struct name_table *table, int val, int *rem) { if (val == 0) { fputs("0", fp); if (rem != NULL) *rem = 0; return (true); } return (print_mask_int(fp, table, val, rem)); } /* * Like print_mask_0 but for a unsigned long instead of an int. */ static bool print_mask_0ul(FILE *fp, struct name_table *table, u_long lval, u_long *rem) { uintmax_t val; bool printed; if (lval == 0) { fputs("0", fp); if (rem != NULL) *rem = 0; return (true); } printed = false; val = lval; print_mask_part(fp, table, &val, &printed); if (rem != NULL) *rem = val; return (printed); } static void print_integer(FILE *fp, int val, int base) { switch (base) { case 8: fprintf(fp, "0%o", val); break; case 10: fprintf(fp, "%d", val); break; case 16: fprintf(fp, "0x%x", val); break; default: abort2("bad base", 0, NULL); break; } } static bool print_value(FILE *fp, struct name_table *table, uintmax_t val) { const char *str; str = lookup_value(table, val); if (str != NULL) { fputs(str, fp); return (true); } return (false); } const char * sysdecode_atfd(int fd) { if (fd == AT_FDCWD) return ("AT_FDCWD"); return (NULL); } static struct name_table semctlops[] = { X(GETNCNT) X(GETPID) X(GETVAL) X(GETALL) X(GETZCNT) X(SETVAL) X(SETALL) X(IPC_RMID) X(IPC_SET) X(IPC_STAT) XEND }; const char * sysdecode_semctl_cmd(int cmd) { return (lookup_value(semctlops, cmd)); } static struct name_table shmctlops[] = { X(IPC_RMID) X(IPC_SET) X(IPC_STAT) XEND }; const char * sysdecode_shmctl_cmd(int cmd) { return (lookup_value(shmctlops, cmd)); } const char * sysdecode_msgctl_cmd(int cmd) { return (sysdecode_shmctl_cmd(cmd)); } static struct name_table semgetflags[] = { X(IPC_CREAT) X(IPC_EXCL) X(SEM_R) X(SEM_A) X((SEM_R>>3)) X((SEM_A>>3)) X((SEM_R>>6)) X((SEM_A>>6)) XEND }; bool sysdecode_semget_flags(FILE *fp, int flag, int *rem) { return (print_mask_int(fp, semgetflags, flag, rem)); } static struct name_table idtypes[] = { X(P_PID) X(P_PPID) X(P_PGID) X(P_SID) X(P_CID) X(P_UID) X(P_GID) X(P_ALL) X(P_LWPID) X(P_TASKID) X(P_PROJID) X(P_POOLID) X(P_JAILID) X(P_CTID) X(P_CPUID) X(P_PSETID) XEND }; /* XXX: idtype is really an idtype_t */ const char * sysdecode_idtype(int idtype) { return (lookup_value(idtypes, idtype)); } /* * [g|s]etsockopt's level argument can either be SOL_SOCKET or a * protocol-specific value. */ const char * sysdecode_sockopt_level(int level) { const char *str; if (level == SOL_SOCKET) return ("SOL_SOCKET"); /* SOL_* constants for Bluetooth sockets. */ str = lookup_value(ngbtsolevel, level); if (str != NULL) return (str); /* * IP and Infiniband sockets use IP protocols as levels. Not all * protocols are valid but it is simpler to just allow all of them. * * XXX: IPPROTO_IP == 0, but UNIX domain sockets use a level of 0 * for private options. */ str = sysdecode_ipproto(level); if (str != NULL) return (str); return (NULL); } bool sysdecode_vmprot(FILE *fp, int type, int *rem) { return (print_mask_int(fp, vmprot, type, rem)); } static struct name_table sockflags[] = { X(SOCK_CLOEXEC) X(SOCK_NONBLOCK) XEND }; bool sysdecode_socket_type(FILE *fp, int type, int *rem) { const char *str; uintmax_t val; bool printed; str = lookup_value(socktype, type & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)); if (str != NULL) { fputs(str, fp); *rem = 0; printed = true; } else { *rem = type & ~(SOCK_CLOEXEC | SOCK_NONBLOCK); printed = false; } val = type & (SOCK_CLOEXEC | SOCK_NONBLOCK); print_mask_part(fp, sockflags, &val, &printed); return (printed); } bool sysdecode_access_mode(FILE *fp, int mode, int *rem) { return (print_mask_int(fp, accessmode, mode, rem)); } /* XXX: 'type' is really an acl_type_t. */ const char * sysdecode_acltype(int type) { return (lookup_value(acltype, type)); } bool sysdecode_cap_fcntlrights(FILE *fp, uint32_t rights, uint32_t *rem) { return (print_mask_int(fp, capfcntl, rights, rem)); } const char * sysdecode_extattrnamespace(int namespace) { return (lookup_value(extattrns, namespace)); } const char * sysdecode_fadvice(int advice) { return (lookup_value(fadvisebehav, advice)); } bool sysdecode_open_flags(FILE *fp, int flags, int *rem) { bool printed; int mode; uintmax_t val; mode = flags & O_ACCMODE; flags &= ~O_ACCMODE; switch (mode) { case O_RDONLY: if (flags & O_EXEC) { flags &= ~O_EXEC; fputs("O_EXEC", fp); } else fputs("O_RDONLY", fp); printed = true; mode = 0; break; case O_WRONLY: fputs("O_WRONLY", fp); printed = true; mode = 0; break; case O_RDWR: fputs("O_RDWR", fp); printed = true; mode = 0; break; default: printed = false; } val = (unsigned)flags; print_mask_part(fp, openflags, &val, &printed); if (rem != NULL) *rem = val | mode; return (printed); } bool sysdecode_fcntl_fileflags(FILE *fp, int flags, int *rem) { bool printed; int oflags; /* * The file flags used with F_GETFL/F_SETFL mostly match the * flags passed to open(2). However, a few open-only flag * bits have been repurposed for fcntl-only flags. */ oflags = flags & ~(O_NOFOLLOW | FRDAHEAD); printed = sysdecode_open_flags(fp, oflags, rem); if (flags & O_NOFOLLOW) { fprintf(fp, "%sFPOIXSHM", printed ? "|" : ""); printed = true; } if (flags & FRDAHEAD) { fprintf(fp, "%sFRDAHEAD", printed ? "|" : ""); printed = true; } return (printed); } bool sysdecode_flock_operation(FILE *fp, int operation, int *rem) { return (print_mask_int(fp, flockops, operation, rem)); } static struct name_table getfsstatmode[] = { X(MNT_WAIT) X(MNT_NOWAIT) XEND }; const char * sysdecode_getfsstat_mode(int mode) { return (lookup_value(getfsstatmode, mode)); } const char * +sysdecode_getrusage_who(int who) +{ + + return (lookup_value(rusage, who)); +} + +const char * sysdecode_kldsym_cmd(int cmd) { return (lookup_value(kldsymcmd, cmd)); } const char * sysdecode_kldunload_flags(int flags) { return (lookup_value(kldunloadfflags, flags)); } const char * sysdecode_lio_listio_mode(int mode) { return (lookup_value(lio_listiomodes, mode)); } const char * sysdecode_madvice(int advice) { return (lookup_value(madvisebehav, advice)); } const char * sysdecode_minherit_inherit(int inherit) { return (lookup_value(minheritflags, inherit)); } bool sysdecode_mlockall_flags(FILE *fp, int flags, int *rem) { return (print_mask_int(fp, mlockallflags, flags, rem)); } bool sysdecode_mmap_prot(FILE *fp, int prot, int *rem) { return (print_mask_int(fp, mmapprot, prot, rem)); } bool sysdecode_fileflags(FILE *fp, fflags_t flags, fflags_t *rem) { return (print_mask_0(fp, fileflags, flags, rem)); } bool sysdecode_filemode(FILE *fp, int mode, int *rem) { return (print_mask_0(fp, filemode, mode, rem)); } bool sysdecode_mount_flags(FILE *fp, int flags, int *rem) { return (print_mask_int(fp, mountflags, flags, rem)); } bool sysdecode_msync_flags(FILE *fp, int flags, int *rem) { return (print_mask_int(fp, msyncflags, flags, rem)); } const char * sysdecode_nfssvc_flags(int flags) { return (lookup_value(nfssvcflags, flags)); } static struct name_table pipe2flags[] = { X(O_CLOEXEC) X(O_NONBLOCK) XEND }; bool sysdecode_pipe2_flags(FILE *fp, int flags, int *rem) { return (print_mask_0(fp, pipe2flags, flags, rem)); } const char * sysdecode_prio_which(int which) { return (lookup_value(prio, which)); } const char * sysdecode_procctl_cmd(int cmd) { return (lookup_value(procctlcmd, cmd)); } const char * sysdecode_ptrace_request(int request) { return (lookup_value(ptraceop, request)); } static struct name_table quotatypes[] = { X(GRPQUOTA) X(USRQUOTA) XEND }; bool sysdecode_quotactl_cmd(FILE *fp, int cmd) { const char *primary, *type; primary = lookup_value(quotactlcmds, cmd >> SUBCMDSHIFT); if (primary == NULL) return (false); fprintf(fp, "QCMD(%s,", primary); type = lookup_value(quotatypes, cmd & SUBCMDMASK); if (type != NULL) fprintf(fp, "%s", type); else fprintf(fp, "%#x", cmd & SUBCMDMASK); fprintf(fp, ")"); return (true); } bool sysdecode_reboot_howto(FILE *fp, int howto, int *rem) { return (print_mask_int(fp, rebootopt, howto, rem)); } bool sysdecode_rfork_flags(FILE *fp, int flags, int *rem) { return (print_mask_int(fp, rforkflags, flags, rem)); } const char * sysdecode_rlimit(int resource) { return (lookup_value(rlimit, resource)); } const char * sysdecode_scheduler_policy(int policy) { return (lookup_value(schedpolicy, policy)); } bool sysdecode_sendfile_flags(FILE *fp, int flags, int *rem) { return (print_mask_int(fp, sendfileflags, flags, rem)); } bool sysdecode_shmat_flags(FILE *fp, int flags, int *rem) { return (print_mask_int(fp, shmatflags, flags, rem)); } const char * sysdecode_shutdown_how(int how) { return (lookup_value(shutdownhow, how)); } const char * sysdecode_sigbus_code(int si_code) { return (lookup_value(sigbuscode, si_code)); } const char * sysdecode_sigchld_code(int si_code) { return (lookup_value(sigchldcode, si_code)); } const char * sysdecode_sigfpe_code(int si_code) { return (lookup_value(sigfpecode, si_code)); } const char * sysdecode_sigill_code(int si_code) { return (lookup_value(sigillcode, si_code)); } const char * sysdecode_sigsegv_code(int si_code) { return (lookup_value(sigsegvcode, si_code)); } const char * sysdecode_sigtrap_code(int si_code) { return (lookup_value(sigtrapcode, si_code)); } const char * sysdecode_sigprocmask_how(int how) { return (lookup_value(sigprocmaskhow, how)); } const char * sysdecode_socketdomain(int domain) { return (lookup_value(sockdomain, domain)); } const char * sysdecode_socket_protocol(int domain, int protocol) { switch (domain) { case PF_INET: case PF_INET6: return (lookup_value(sockipproto, protocol)); default: return (NULL); } } const char * sysdecode_sockaddr_family(int sa_family) { return (lookup_value(sockfamily, sa_family)); } const char * sysdecode_ipproto(int protocol) { return (lookup_value(sockipproto, protocol)); } const char * sysdecode_sockopt_name(int level, int optname) { if (level == SOL_SOCKET) return (lookup_value(sockopt, optname)); if (level == IPPROTO_IP) /* XXX: UNIX domain socket options use a level of 0 also. */ return (lookup_value(sockoptip, optname)); if (level == IPPROTO_IPV6) return (lookup_value(sockoptipv6, optname)); if (level == IPPROTO_SCTP) return (lookup_value(sockoptsctp, optname)); if (level == IPPROTO_TCP) return (lookup_value(sockopttcp, optname)); if (level == IPPROTO_UDP) return (lookup_value(sockoptudp, optname)); if (level == IPPROTO_UDPLITE) return (lookup_value(sockoptudplite, optname)); return (NULL); } bool sysdecode_thr_create_flags(FILE *fp, int flags, int *rem) { return (print_mask_int(fp, thrcreateflags, flags, rem)); } const char * sysdecode_umtx_op(int op) { return (lookup_value(umtxop, op)); } const char * sysdecode_vmresult(int result) { return (lookup_value(vmresult, result)); } bool sysdecode_wait4_options(FILE *fp, int options, int *rem) { bool printed; int opt6; /* A flags value of 0 is normal. */ if (options == 0) { fputs("0", fp); if (rem != NULL) *rem = 0; return (true); } /* * These flags are implicit and aren't valid flags for wait4() * directly (though they don't fail with EINVAL). */ opt6 = options & (WEXITED | WTRAPPED); options &= ~opt6; printed = print_mask_int(fp, wait6opt, options, rem); if (rem != NULL) *rem |= opt6; return (printed); } bool sysdecode_wait6_options(FILE *fp, int options, int *rem) { return (print_mask_int(fp, wait6opt, options, rem)); } const char * sysdecode_whence(int whence) { return (lookup_value(seekwhence, whence)); } const char * sysdecode_fcntl_cmd(int cmd) { return (lookup_value(fcntlcmd, cmd)); } static struct name_table fcntl_fd_arg[] = { X(FD_CLOEXEC) X(0) XEND }; bool sysdecode_fcntl_arg_p(int cmd) { switch (cmd) { case F_GETFD: case F_GETFL: case F_GETOWN: return (false); default: return (true); } } void sysdecode_fcntl_arg(FILE *fp, int cmd, uintptr_t arg, int base) { int rem; switch (cmd) { case F_SETFD: if (!print_value(fp, fcntl_fd_arg, arg)) print_integer(fp, arg, base); break; case F_SETFL: if (!sysdecode_fcntl_fileflags(fp, arg, &rem)) fprintf(fp, "%#x", rem); else if (rem != 0) fprintf(fp, "|%#x", rem); break; case F_GETLK: case F_SETLK: case F_SETLKW: fprintf(fp, "%p", (void *)arg); break; default: print_integer(fp, arg, base); break; } } bool sysdecode_mmap_flags(FILE *fp, int flags, int *rem) { uintmax_t val; bool printed; int align; /* * MAP_ALIGNED can't be handled directly by print_mask_int(). * MAP_32BIT is also problematic since it isn't defined for * all platforms. */ printed = false; align = flags & MAP_ALIGNMENT_MASK; val = (unsigned)flags & ~MAP_ALIGNMENT_MASK; print_mask_part(fp, mmapflags, &val, &printed); #ifdef MAP_32BIT if (val & MAP_32BIT) { fprintf(fp, "%sMAP_32BIT", printed ? "|" : ""); printed = true; val &= ~MAP_32BIT; } #endif if (align != 0) { if (printed) fputc('|', fp); if (align == MAP_ALIGNED_SUPER) fputs("MAP_ALIGNED_SUPER", fp); else fprintf(fp, "MAP_ALIGNED(%d)", align >> MAP_ALIGNMENT_SHIFT); printed = true; } if (rem != NULL) *rem = val; return (printed); } const char * sysdecode_rtprio_function(int function) { return (lookup_value(rtpriofuncs, function)); } bool sysdecode_msg_flags(FILE *fp, int flags, int *rem) { return (print_mask_0(fp, msgflags, flags, rem)); } const char * sysdecode_sigcode(int sig, int si_code) { const char *str; str = lookup_value(sigcode, si_code); if (str != NULL) return (str); switch (sig) { case SIGILL: return (sysdecode_sigill_code(si_code)); case SIGBUS: return (sysdecode_sigbus_code(si_code)); case SIGSEGV: return (sysdecode_sigsegv_code(si_code)); case SIGFPE: return (sysdecode_sigfpe_code(si_code)); case SIGTRAP: return (sysdecode_sigtrap_code(si_code)); case SIGCHLD: return (sysdecode_sigchld_code(si_code)); default: return (NULL); } } bool sysdecode_umtx_cvwait_flags(FILE *fp, u_long flags, u_long *rem) { return (print_mask_0ul(fp, umtxcvwaitflags, flags, rem)); } bool sysdecode_umtx_rwlock_flags(FILE *fp, u_long flags, u_long *rem) { return (print_mask_0ul(fp, umtxrwlockflags, flags, rem)); } void sysdecode_cap_rights(FILE *fp, cap_rights_t *rightsp) { struct name_table *t; bool comma; comma = false; for (t = caprights; t->str != NULL; t++) { if (cap_rights_is_set(rightsp, t->val)) { fprintf(fp, "%s%s", comma ? "," : "", t->str); comma = true; } } } Index: projects/clang500-import/lib/libsysdecode/mktables =================================================================== --- projects/clang500-import/lib/libsysdecode/mktables (revision 319548) +++ projects/clang500-import/lib/libsysdecode/mktables (revision 319549) @@ -1,161 +1,162 @@ #!/bin/sh # # Copyright (c) 2006 "David Kirchner" . All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $FreeBSD$ # # Generates tables.h # # Originally this script was 'mksubr' for kdump which generated a complete # C file along with function definitions. Now this script generates tables # of constants and names extracted from header files. set -e LC_ALL=C; export LC_ALL if [ -z "$1" ] then echo "usage: sh $0 include-dir [output-file]" exit 1 fi include_dir=$1 if [ -n "$2" ]; then output_file="$2" exec > "$output_file" fi all_headers= # # Generate a table C #definitions. The including file can define the # TABLE_NAME(n), TABLE_ENTRY(x), and TABLE_END macros to define what # the tables map to. # gen_table() { local name grep file excl filter name=$1 grep=$2 file=$3 excl=$4 if [ -z "$excl" ]; then filter="cat" else filter="egrep -v" fi all_headers="${all_headers:+${all_headers} }${file}" cat <<_EOF_ TABLE_START(${name}) _EOF_ egrep "^#[[:space:]]*define[[:space:]]+"${grep}"[[:space:]]*" \ $include_dir/$file | ${filter} ${excl} | \ awk '{ for (i = 1; i <= NF; i++) \ if ($i ~ /define/) \ break; \ ++i; \ printf "TABLE_ENTRY(%s)\n", $i }' cat <<_EOF_ TABLE_END _EOF_ } cat <<_EOF_ /* This file is auto-generated. */ _EOF_ gen_table "accessmode" "[A-Z]_OK[[:space:]]+0?x?[0-9A-Fa-f]+" "sys/unistd.h" gen_table "acltype" "ACL_TYPE_[A-Z4_]+[[:space:]]+0x[0-9]+" "sys/acl.h" gen_table "capfcntl" "CAP_FCNTL_[A-Z]+[[:space:]]+\(1" "sys/capsicum.h" gen_table "extattrns" "EXTATTR_NAMESPACE_[A-Z]+[[:space:]]+0x[0-9]+" "sys/extattr.h" gen_table "fadvisebehav" "POSIX_FADV_[A-Z]+[[:space:]]+[0-9]+" "sys/fcntl.h" gen_table "openflags" "O_[A-Z]+[[:space:]]+0x[0-9A-Fa-f]+" "sys/fcntl.h" "O_RDONLY|O_RDWR|O_WRONLY" gen_table "flockops" "LOCK_[A-Z]+[[:space:]]+0x[0-9]+" "sys/fcntl.h" gen_table "kldsymcmd" "KLDSYM_[A-Z]+[[:space:]]+[0-9]+" "sys/linker.h" gen_table "kldunloadfflags" "LINKER_UNLOAD_[A-Z]+[[:space:]]+[0-9]+" "sys/linker.h" gen_table "lio_listiomodes" "LIO_(NO)?WAIT[[:space:]]+[0-9]+" "aio.h" gen_table "madvisebehav" "_?MADV_[A-Z]+[[:space:]]+[0-9]+" "sys/mman.h" gen_table "minheritflags" "INHERIT_[A-Z]+[[:space:]]+[0-9]+" "sys/mman.h" gen_table "mlockallflags" "MCL_[A-Z]+[[:space:]]+0x[0-9]+" "sys/mman.h" gen_table "mmapprot" "PROT_[A-Z]+[[:space:]]+0x[0-9A-Fa-f]+" "sys/mman.h" gen_table "ngbtsolevel" "SOL_[A-Z0-9]+[[:space:]]+0x[0-9A-Fa-f]+" "netgraph/bluetooth/include/ng_btsocket.h" gen_table "fileflags" "[SU]F_[A-Z]+[[:space:]]+0x[0-9A-Fa-f]+" "sys/stat.h" "UF_COMPRESSED|UF_TRACKED|UF_SETTABLE|SF_SETTABLE" gen_table "filemode" "S_[A-Z]+[[:space:]]+[0-6]{7}" "sys/stat.h" gen_table "mountflags" "MNT_[A-Z]+[[:space:]]+0x[0-9]+" "sys/mount.h" gen_table "msyncflags" "MS_[A-Z]+[[:space:]]+0x[0-9]+" "sys/mman.h" gen_table "nfssvcflags" "NFSSVC_[A-Z0-9]+[[:space:]]+0x[0-9]+" "nfs/nfssvc.h" gen_table "prio" "PRIO_[A-Z]+[[:space:]]+[0-9]" "sys/resource.h" gen_table "procctlcmd" "PROC_[A-Z_]+[[:space:]]+[0-9]" "sys/procctl.h" "PROC_TRACE_CTL_" gen_table "ptraceop" "PT_[[:alnum:]_]+[[:space:]]+[0-9]+" "sys/ptrace.h" gen_table "quotactlcmds" "Q_[A-Z]+[[:space:]]+0x[0-9]+" "ufs/ufs/quota.h" gen_table "rebootopt" "RB_[A-Z]+[[:space:]]+0x[0-9]+" "sys/reboot.h" gen_table "rforkflags" "RF[A-Z]+[[:space:]]+\([0-9]+<<[0-9]+\)" "sys/unistd.h" gen_table "rlimit" "RLIMIT_[A-Z]+[[:space:]]+[0-9]+" "sys/resource.h" +gen_table "rusage" "RUSAGE_[A-Z]+[[:space:]]+[-0-9]+" "sys/resource.h" gen_table "schedpolicy" "SCHED_[A-Z]+[[:space:]]+[0-9]+" "sched.h" gen_table "sendfileflags" "SF_[A-Z]+[[:space:]]+[0-9]+" "sys/socket.h" gen_table "shmatflags" "SHM_[A-Z]+[[:space:]]+[0-9]{6}+" "sys/shm.h" gen_table "shutdownhow" "SHUT_[A-Z]+[[:space:]]+[0-9]+" "sys/socket.h" gen_table "sigbuscode" "BUS_[A-Z]+[[:space:]]+[0-9]+" "sys/signal.h" gen_table "sigchldcode" "CLD_[A-Z]+[[:space:]]+[0-9]+" "sys/signal.h" gen_table "sigfpecode" "FPE_[A-Z]+[[:space:]]+[0-9]+" "sys/signal.h" gen_table "sigprocmaskhow" "SIG_[A-Z]+[[:space:]]+[0-9]+" "sys/signal.h" gen_table "sigillcode" "ILL_[A-Z]+[[:space:]]+[0-9]+" "sys/signal.h" gen_table "sigsegvcode" "SEGV_[A-Z]+[[:space:]]+[0-9]+" "sys/signal.h" gen_table "sigtrapcode" "TRAP_[A-Z]+[[:space:]]+[0-9]+" "sys/signal.h" gen_table "sockdomain" "PF_[[:alnum:]]+[[:space:]]+" "sys/socket.h" gen_table "sockfamily" "AF_[[:alnum:]]+[[:space:]]+" "sys/socket.h" gen_table "sockipproto" "IPPROTO_[[:alnum:]]+[[:space:]]+" "netinet/in.h" gen_table "sockopt" "SO_[A-Z]+[[:space:]]+0x[0-9]+" "sys/socket.h" gen_table "sockoptip" "(IP_[[:alnum:]_]+|MCAST_[[:alnum:]_]+_GROUP)[[:space:]]+" "netinet/in.h" "IP_DEFAULT|IP_MIN|IP_MAX|IP_PORTRANGE" gen_table "sockoptipv6" "IPV6_[[:alnum:]_]+[[:space:]]+[0-9]+" "netinet6/in6.h" "IPV6_ADDR_|IPV6_TAG_DIRECT|IPV6_OPTIONS|IPV6_RECVOPTS|IPV6_RECVRETOPTS|IPV6_RECVDSTADDR|IPV6_RETOPTS|IPV6_2292|IPV6_RECVRTHDRDSTOPTS|IPV6_REACHCONF|IPV6_PKTOPTIONS" gen_table "sockoptsctp" "SCTP_[[:alnum:]_]+[[:space:]]+[0-9]+" "netinet/sctp.h" gen_table "sockopttcp" "TCP_[[:alnum:]_]+[[:space:]]+[0-9]+" "netinet/tcp.h" "TCP_MIN|TCP_MAX[^S]|TCP_MSS|TCP_[[:alnum:]_]+_MAX" gen_table "sockoptudp" "UDP_[[:alnum:]]+[[:space:]]+[0-9]+" "netinet/udp.h" "UDP_ENCAP_" gen_table "sockoptudplite" "UDPLITE_[[:alnum:]_]+[[:space:]]+[0-9]+" "netinet/udplite.h" gen_table "socktype" "SOCK_[A-Z]+[[:space:]]+[1-9]+[0-9]*" "sys/socket.h" gen_table "thrcreateflags" "THR_[A-Z]+[[:space:]]+0x[0-9]+" "sys/thr.h" gen_table "umtxop" "UMTX_OP_[[:alnum:]_]+[[:space:]]+[0-9]+" "sys/umtx.h" gen_table "vmprot" "VM_PROT_[A-Z]+[[:space:]]+\(\(vm_prot_t\)[[:space:]]+0x[0-9]+\)" "vm/vm.h" gen_table "vmresult" "KERN_[A-Z]+[[:space:]]+[0-9]+" "vm/vm_param.h" gen_table "wait6opt" "W[A-Z]+[[:space:]]+[0-9]+" "sys/wait.h" gen_table "seekwhence" "SEEK_[A-Z]+[[:space:]]+[0-9]+" "sys/unistd.h" gen_table "fcntlcmd" "F_[A-Z0-9_]+[[:space:]]+[0-9]+[[:space:]]+" "sys/fcntl.h" "F_CANCEL|F_..LCK" gen_table "mmapflags" "MAP_[A-Z_]+[[:space:]]+0x[0-9A-Fa-f]+" "sys/mman.h" gen_table "rtpriofuncs" "RTP_[A-Z]+[[:space:]]+[0-9]+" "sys/rtprio.h" gen_table "msgflags" "MSG_[A-Z]+[[:space:]]+0x[0-9]+" "sys/socket.h" "MSG_SOCALLBCK|MSG_MORETOCOME" gen_table "sigcode" "SI_[A-Z]+[[:space:]]+0(x[0-9abcdef]+)?" "sys/signal.h" gen_table "umtxcvwaitflags" "CVWAIT_[A-Z_]+[[:space:]]+0x[0-9]+" "sys/umtx.h" gen_table "umtxrwlockflags" "URWLOCK_PREFER_READER[[:space:]]+0x[0-9]+" "sys/umtx.h" gen_table "caprights" "CAP_[A-Z_]+[[:space:]]+CAPRIGHT\([0-9],[[:space:]]+0x[0-9]{16}ULL\)" "sys/capsicum.h" # Generate a .depend file for our output file if [ -n "$output_file" ]; then echo "$output_file: \\" > ".depend.$output_file" echo "$all_headers" | tr ' ' '\n' | sort -u | sed -e "s,^, $include_dir/," -e 's,$, \\,' >> \ ".depend.$output_file" echo >> ".depend.$output_file" fi Index: projects/clang500-import/lib/libsysdecode/sysdecode.3 =================================================================== --- projects/clang500-import/lib/libsysdecode/sysdecode.3 (revision 319548) +++ projects/clang500-import/lib/libsysdecode/sysdecode.3 (revision 319549) @@ -1,86 +1,87 @@ .\" .\" Copyright (c) 2015 John Baldwin .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd October 17, 2016 +.Dd June 3, 2017 .Dt SYSDECODE 3 .Os .Sh NAME .Nm sysdecode .Nd system argument decoding library .Sh LIBRARY .Lb libsysdecode .Sh SYNOPSIS .In sys/types.h .In stdbool.h .In sysdecode.h .Sh DESCRIPTION The .Nm library includes several functions that provide descriptive names of values associated with system calls. .Ss Supported ABIs Some functions in this library provide ABI-specific descriptions. The supported ABIs are named by the .Vt enum sysdecode_abi enumeration. .Pp .Bl -tag -width "Li SYSDECODE_ABI_CLOUDABI64" -compact .It Li SYSDECODE_ABI_FREEBSD Native FreeBSD binaries. Supported on all platforms. .It Li SYSDECODE_ABI_FREEBSD32 32-bit FreeBSD binaries. Supported on amd64 and powerpc64. .It Li SYSDECODE_ABI_LINUX Linux binaries of the same platform. Supported on amd64 and i386. .It Li SYSDECODE_ABI_LINUX32 32-bit Linux binaries. Supported on amd64. .It Li SYSDECODE_ABI_CLOUDABI64 64-bit CloudABI binaries. Supported on aarch64 and amd64. .It Li SYSDECODE_ABI_UNKNOWN A placeholder for use when the ABI is not known. .El .Sh SEE ALSO .Xr sysdecode_abi_to_freebsd_errno 3 , .Xr sysdecode_cap_rights 3 , .Xr sysdecode_enum 3 , .Xr sysdecode_fcntl_arg 3 , .Xr sysdecode_ioctlname 3 , .Xr sysdecode_mask 3 , .Xr sysdecode_quotactl_cmd 3 , .Xr sysdecode_sigcode 3 , +.Xr sysdecode_socket_protocol 3 , .Xr sysdecode_sockopt_name 3 , .Xr sysdecode_syscallnames 3 , .Xr sysdecode_utrace 3 .Sh HISTORY The .Nm library first appeared in .Fx 11.0 . Index: projects/clang500-import/lib/libsysdecode/sysdecode.h =================================================================== --- projects/clang500-import/lib/libsysdecode/sysdecode.h (revision 319548) +++ projects/clang500-import/lib/libsysdecode/sysdecode.h (revision 319549) @@ -1,118 +1,119 @@ /*- * Copyright (c) 2015 John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __SYSDECODE_H__ #define __SYSDECODE_H__ enum sysdecode_abi { SYSDECODE_ABI_UNKNOWN = 0, SYSDECODE_ABI_FREEBSD, SYSDECODE_ABI_FREEBSD32, SYSDECODE_ABI_LINUX, SYSDECODE_ABI_LINUX32, SYSDECODE_ABI_CLOUDABI64 }; int sysdecode_abi_to_freebsd_errno(enum sysdecode_abi _abi, int _error); bool sysdecode_access_mode(FILE *_fp, int _mode, int *_rem); const char *sysdecode_acltype(int _type); const char *sysdecode_atfd(int _fd); bool sysdecode_cap_fcntlrights(FILE *_fp, uint32_t _rights, uint32_t *_rem); void sysdecode_cap_rights(FILE *_fp, cap_rights_t *_rightsp); const char *sysdecode_extattrnamespace(int _namespace); const char *sysdecode_fadvice(int _advice); void sysdecode_fcntl_arg(FILE *_fp, int _cmd, uintptr_t _arg, int _base); bool sysdecode_fcntl_arg_p(int _cmd); const char *sysdecode_fcntl_cmd(int _cmd); bool sysdecode_fcntl_fileflags(FILE *_fp, int _flags, int *_rem); bool sysdecode_fileflags(FILE *_fp, fflags_t _flags, fflags_t *_rem); bool sysdecode_filemode(FILE *_fp, int _mode, int *_rem); bool sysdecode_flock_operation(FILE *_fp, int _operation, int *_rem); int sysdecode_freebsd_to_abi_errno(enum sysdecode_abi _abi, int _error); const char *sysdecode_getfsstat_mode(int _mode); +const char *sysdecode_getrusage_who(int _who); const char *sysdecode_idtype(int _idtype); const char *sysdecode_ioctlname(unsigned long _val); const char *sysdecode_ipproto(int _protocol); const char *sysdecode_kldsym_cmd(int _cmd); const char *sysdecode_kldunload_flags(int _flags); const char *sysdecode_lio_listio_mode(int _mode); const char *sysdecode_madvice(int _advice); const char *sysdecode_minherit_inherit(int _inherit); const char *sysdecode_msgctl_cmd(int _cmd); bool sysdecode_mlockall_flags(FILE *_fp, int _flags, int *_rem); bool sysdecode_mmap_flags(FILE *_fp, int _flags, int *_rem); bool sysdecode_mmap_prot(FILE *_fp, int _prot, int *_rem); bool sysdecode_mount_flags(FILE *_fp, int _flags, int *_rem); bool sysdecode_msg_flags(FILE *_fp, int _flags, int *_rem); bool sysdecode_msync_flags(FILE *_fp, int _flags, int *_rem); const char *sysdecode_nfssvc_flags(int _flags); bool sysdecode_open_flags(FILE *_fp, int _flags, int *_rem); bool sysdecode_pipe2_flags(FILE *_fp, int _flags, int *_rem); const char *sysdecode_prio_which(int _which); const char *sysdecode_procctl_cmd(int _cmd); const char *sysdecode_ptrace_request(int _request); bool sysdecode_quotactl_cmd(FILE *_fp, int _cmd); bool sysdecode_reboot_howto(FILE *_fp, int _howto, int *_rem); bool sysdecode_rfork_flags(FILE *_fp, int _flags, int *_rem); const char *sysdecode_rlimit(int _resource); const char *sysdecode_rtprio_function(int _function); const char *sysdecode_scheduler_policy(int _policy); const char *sysdecode_semctl_cmd(int _cmd); bool sysdecode_semget_flags(FILE *_fp, int _flag, int *_rem); bool sysdecode_sendfile_flags(FILE *_fp, int _flags, int *_rem); bool sysdecode_shmat_flags(FILE *_fp, int _flags, int *_rem); const char *sysdecode_shmctl_cmd(int _cmd); const char *sysdecode_shutdown_how(int _how); const char *sysdecode_sigbus_code(int _si_code); const char *sysdecode_sigchld_code(int _si_code); const char *sysdecode_sigcode(int _sig, int _si_code); const char *sysdecode_sigfpe_code(int _si_code); const char *sysdecode_sigill_code(int _si_code); const char *sysdecode_signal(int _sig); const char *sysdecode_sigprocmask_how(int _how); const char *sysdecode_sigsegv_code(int _si_code); const char *sysdecode_sigtrap_code(int _si_code); const char *sysdecode_sockaddr_family(int _sa_family); const char *sysdecode_socketdomain(int _domain); const char *sysdecode_socket_protocol(int _domain, int _protocol); bool sysdecode_socket_type(FILE *_fp, int _type, int *_rem); const char *sysdecode_sockopt_level(int _level); const char *sysdecode_sockopt_name(int _level, int _optname); const char *sysdecode_syscallname(enum sysdecode_abi _abi, unsigned int _code); bool sysdecode_thr_create_flags(FILE *_fp, int _flags, int *_rem); bool sysdecode_umtx_cvwait_flags(FILE *_fp, u_long _flags, u_long *_rem); const char *sysdecode_umtx_op(int _op); bool sysdecode_umtx_rwlock_flags(FILE *_fp, u_long _flags, u_long *_rem); int sysdecode_utrace(FILE *_fp, void *_buf, size_t _len); bool sysdecode_vmprot(FILE *_fp, int _type, int *_rem); const char *sysdecode_vmresult(int _result); bool sysdecode_wait4_options(FILE *_fp, int _options, int *_rem); bool sysdecode_wait6_options(FILE *_fp, int _options, int *_rem); const char *sysdecode_whence(int _whence); #endif /* !__SYSDECODE_H__ */ Index: projects/clang500-import/lib/libsysdecode/sysdecode_enum.3 =================================================================== --- projects/clang500-import/lib/libsysdecode/sysdecode_enum.3 (revision 319548) +++ projects/clang500-import/lib/libsysdecode/sysdecode_enum.3 (revision 319549) @@ -1,239 +1,243 @@ .\" .\" Copyright (c) 2016 John Baldwin .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd January 2, 2017 +.Dd June 3, 2017 .Dt sysdecode_enum 3 .Os .Sh NAME .Nm sysdecode_enum , .Nm sysdecode_acltype , .Nm sysdecode_atfd , .Nm sysdecode_extattrnamespace , .Nm sysdecode_fadvice , .Nm sysdecode_fcntl_cmd , .Nm sysdecode_getfsstat_mode , +.Nm sysdecode_getrusage_who , .Nm sysdecode_idtype , .Nm sysdecode_ipproto , .Nm sysdecode_kldsym_cmd , .Nm sysdecode_kldunload_flags , .Nm sysdecode_lio_listio_mode , .Nm sysdecode_madvice , .Nm sysdecode_minherit_flags , .Nm sysdecode_msgctl_cmd , .Nm sysdecode_nfssvc_flags , .Nm sysdecode_prio_which , .Nm sysdecode_procctl_cmd , .Nm sysdecode_ptrace_request , .Nm sysdecode_rlimit , .Nm sysdecode_rtprio_function , .Nm sysdecode_scheduler_policy , .Nm sysdecode_semctl_cmd , .Nm sysdecode_shmctl_cmd , .Nm sysdecode_shutdown_how , .Nm sysdecode_sigbus_code , .Nm sysdecode_sigchld_code , .Nm sysdecode_sigfpe_code , .Nm sysdecode_sigill_code , .Nm sysdecode_signal , .Nm sysdecode_sigprocmask_how , .Nm sysdecode_sigsegv_code , .Nm sysdecode_sigtrap_code , .Nm sysdecode_sockaddr_family , .Nm sysdecode_socketdomain , .Nm sysdecode_sockettype , .Nm sysdecode_sockopt_level , .Nm sysdecode_umtx_op , .Nm sysdecode_vmresult , .Nm sysdecode_whence .Nd lookup name of various enumerated values .Sh LIBRARY .Lb libsysdecode .Sh SYNOPSIS .In sys/types.h .In stdbool.h .In sysdecode.h .Ft const char * .Fn sysdecode_acltype "int type" .Ft const char * .Fn sysdecode_atfd "int fd" .Ft const char * .Fn sysdecode_extattrnamespace "int namespace" .Ft const char * .Fn sysdecode_fadvice "int advice" .Ft const char * .Fn sysdecode_fcntl_cmd "int cmd" .Ft const char * .Fn sysdecode_getfsstat_mode "int mode" .Ft const char * +.Fn sysdecode_getrusage_who "int who" +.Ft const char * .Fn sysdecode_idtype "int idtype" .Ft const char * .Fn sysdecode_ipproto "int protocol" .Ft const char * .Fn sysdecode_kldsym_cmd "int cmd" .Ft const char * .Fn sysdecode_kldunload_flags "int flags" .Ft const char * .Fn sysdecode_lio_listio_mode "int mode" .Ft const char * .Fn sysdecode_madvice "int advice" .Ft const char * .Fn sysdecode_minherit_flags "int inherit" .Ft const char * .Fn sysdecode_msgctl_cmd "int cmd" .Ft const char * .Fn sysdecode_nfssvc_flags "int flags" .Ft const char * .Fn sysdecode_prio_which "int which" .Ft const char * .Fn sysdecode_procctl_cmd "int cmd" .Ft const char * .Fn sysdecode_ptrace_request "int request" .Ft const char * .Fn sysdecode_rlimit "int resource" .Ft const char * .Fn sysdecode_rtprio_function "int function" .Ft const char * .Fn sysdecode_scheduler_policy "int policy" .Ft const char * .Fn sysdecode_semctl_cmd "int cmd" .Ft const char * .Fn sysdecode_shmctl_cmd "int cmd" .Ft const char * .Fn sysdecode_shutdown_how "int how" .Ft const char * .Fn sysdecode_sigbus_code "int si_code" .Ft const char * .Fn sysdecode_sigchld_code "int si_code" .Ft const char * .Fn sysdecode_sigfpe_code "int si_code" .Ft const char * .Fn sysdecode_sigill_code "int si_code" .Ft const char * .Fn sysdecode_signal "int sig" .Ft const char * .Fn sysdecode_sigprocmask_how "int how" .Ft const char * .Fn sysdecode_sigsegv_code "int si_code" .Ft const char * .Fn sysdecode_sigtrap_code "int si_code" .Ft const char * .Fn sysdecode_sockaddr_family "int sa_family" .Ft const char * .Fn sysdecode_socketdomain "int domain" .Ft const char * .Fn sysdecode_sockettype "int type" .Ft const char * .Fn sysdecode_sockopt_level "int level" .Ft const char * .Fn sysdecode_umtx_op "int op" .Ft const char * .Fn sysdecode_vmresult "int result" .Ft const char * .Fn sysdecode_whence "int whence" .Sh DESCRIPTION The .Nm functions return a text description of an integer value. The text description matches the name of a C macro with the same value as the sole function argument. .Dv NULL is returned if there is no matching C macro name. .Pp Most of these functions decode an argument passed to a system call: .Bl -column "Fn sysdecode_extattrnamespace" "Xr sched_setscheduler 2" .It Sy Function Ta Sy System Call Ta Sy Argument .It Fn sysdecode_acltype Ta Xr acl_get_file 3 Ta Fa type .It Fn sysdecode_atfd Ta Xr openat 2 Ta Fa fd .It Fn sysdecode_extattrnamespace Ta Xr extattr_get_fd 2 Ta Fa attrnamespace .It Fn sysdecode_fadvice Ta Xr posix_fadvise 2 Ta Fa advice .It Fn sysdecode_fcntl_cmd Ta Xr fcntl 2 Ta Fa cmd .It Fn sysdecode_getfsstat_mode Ta Xr getfsstat 2 Ta Fa mode .It Fn sysdecode_idtype Ta .Xr procctl 2 , .Xr waitid 2 .Ta Fa idtype .It Fn sysdecode_kldsym_cmd Ta Xr kldsym 2 Ta Fa cmd .It Fn sysdecode_kldunload_flags Ta Xr kldunloadf 2 Ta Fa flags .It Fn sysdecode_lio_listio_mode Ta Xr lio_listio 2 Ta Fa mode .It Fn sysdecode_madvice Ta Xr madvise 2 Ta Fa advice .It Fn sysdecode_minherit_inherit Ta Xr minherit 2 Ta Fa inherit .It Fn sysdecode_msgctl_cmd Ta Xr msgctl 2 Ta Fa cmd .It Fn sysdecode_nfssvc_flags Ta Xr nfssvc 2 Ta Fa flags .It Fn sysdecode_prio_which Ta Xr getpriority 2 Ta Fa which .It Fn sysdecode_procctl_cmd Ta Xr procctl 2 Ta Fa cmd .It Fn sysdecode_ptrace_request Ta Xr ptrace 2 Ta Fa request .It Fn sysdecode_rlimit Ta Xr getrlimit 2 Ta Fa resource .It Fn sysdecode_rtprio_function Ta Xr rtprio 2 Ta Fa function +.It Fn sysdecode_getrusage_who Ta Xr getrusage 2 Ta Fa who .It Fn sysdecode_scheduler_policy Ta Xr sched_setscheduler 2 Ta Fa policy .It Fn sysdecode_semctl_cmd Ta Xr semctl 2 Ta Fa cmd .It Fn sysdecode_shmctl_cmd Ta Xr shmctl 2 Ta Fa cmd .It Fn sysdecode_shutdown_how Ta Xr shutdown 2 Ta Fa how .It Fn sysdecode_sigprocmask_how Ta Xr sigprocmask 2 Ta Fa how .It Fn sysdecode_sockopt_level Ta Xr getsockopt 2 Ta Fa level .It Fn sysdecode_umtx_op Ta Xr _umtx_op 2 Ta Fa op .It Fn sysdecode_whence Ta Xr lseek 2 Ta Fa whence .El .Pp These functions decode signal-specific signal codes stored in the .Fa si_code field of the .Vt siginfo_t object associated with an instance of signal: .Bl -column "Fn sysdecode_sigchld_code" .It Sy Function Ta Sy Signal .It Fn sysdecode_sigbus_code Ta Dv SIGBUS .It Fn sysdecode_sigchld_code Ta Dv SIGCHLD .It Fn sysdecode_sigfpe_code Ta Dv SIGFPE .It Fn sysdecode_sigill_code Ta Dv SIGILL .It Fn sysdecode_sigsegv_code Ta Dv SIGSEGV .It Fn sysdecode_sigtrap_code Ta Dv SIGBTRAP .El .Pp Other functions decode the values described below: .Bl -tag -width "Fn sysdecode_sockaddr_family" .It Fn sysdecode_ipproto An IP protocol. .It Fn sysdecode_signal A process signal. .It Fn sysdecode_sockaddr_family A socket address family. .It Fn sysdecode_socketdomain A socket domain. .It Fn sysdecode_vmresult The return value of a function in the virtual memory subsystem of the kernel indicating the status of the associated request. .El .Sh RETURN VALUES The .Nm functions return the name of a matching C macro or .Dv NULL if no matching C macro was found. .Sh SEE ALSO .Xr sysdecode 3 , .Xr sysdecode_mask 3 , .Xr sysdecode_sigcode 3 Index: projects/clang500-import/share/man/man7/ascii.7 =================================================================== --- projects/clang500-import/share/man/man7/ascii.7 (revision 319548) +++ projects/clang500-import/share/man/man7/ascii.7 (revision 319549) @@ -1,153 +1,153 @@ .\" Copyright (c) 1989, 1990, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)ascii.7 8.1 (Berkeley) 6/5/93 .\" $FreeBSD$ .\" .Dd March 10, 2017 .Dt ASCII 7 .Os .Sh NAME .Nm ascii .Nd octal, hexadecimal, decimal and binary .Tn ASCII character sets .Sh DESCRIPTION The .Nm octal set: .Bd -literal -offset left 000 NUL 001 SOH 002 STX 003 ETX 004 EOT 005 ENQ 006 ACK 007 BEL 010 BS 011 HT 012 LF 013 VT 014 FF 015 CR 016 SO 017 SI 020 DLE 021 DC1 022 DC2 023 DC3 024 DC4 025 NAK 026 SYN 027 ETB 030 CAN 031 EM 032 SUB 033 ESC 034 FS 035 GS 036 RS 037 US 040 SP 041 ! 042 " 043 # 044 $ 045 % 046 & 047 ' 050 ( 051 ) 052 * 053 + 054 , 055 - 056 . 057 / 060 0 061 1 062 2 063 3 064 4 065 5 066 6 067 7 070 8 071 9 072 : 073 ; 074 < 075 = 076 > 077 ? 100 @ 101 A 102 B 103 C 104 D 105 E 106 F 107 G 110 H 111 I 112 J 113 K 114 L 115 M 116 N 117 O 120 P 121 Q 122 R 123 S 124 T 125 U 126 V 127 W 130 X 131 Y 132 Z 133 [ 134 \e\ 135 ] 136 ^ 137 _ 140 ` 141 a 142 b 143 c 144 d 145 e 146 f 147 g 150 h 151 i 152 j 153 k 154 l 155 m 156 n 157 o 160 p 161 q 162 r 163 s 164 t 165 u 166 v 167 w 170 x 171 y 172 z 173 { 174 | 175 } 176 ~ 177 DEL .Ed .Pp The .Nm hexadecimal set: .Bd -literal -offset left 00 NUL 01 SOH 02 STX 03 ETX 04 EOT 05 ENQ 06 ACK 07 BEL -08 BS 09 HT 0A LF 0B VT 0C FF 0D CR 0E SO 0F SI +08 BS 09 HT 0a LF 0b VT 0c FF 0d CR 0e SO 0f SI 10 DLE 11 DC1 12 DC2 13 DC3 14 DC4 15 NAK 16 SYN 17 ETB -18 CAN 19 EM 1A SUB 1B ESC 1C FS 1D GS 1E RS 1F US +18 CAN 19 EM 1a SUB 1b ESC 1c FS 1d GS 1e RS 1f US 20 SP 21 ! 22 " 23 # 24 $ 25 % 26 & 27 ' 28 ( 29 ) 2a * 2b + 2c , 2d - 2e . 2f / 30 0 31 1 32 2 33 3 34 4 35 5 36 6 37 7 38 8 39 9 3a : 3b ; 3c < 3d = 3e > 3f ? 40 @ 41 A 42 B 43 C 44 D 45 E 46 F 47 G 48 H 49 I 4a J 4b K 4c L 4d M 4e N 4f O 50 P 51 Q 52 R 53 S 54 T 55 U 56 V 57 W 58 X 59 Y 5a Z 5b [ 5c \e\ 5d ] 5e ^ 5f _ 60 \` 61 a 62 b 63 c 64 d 65 e 66 f 67 g 68 h 69 i 6a j 6b k 6c l 6d m 6e n 6f o 70 p 71 q 72 r 73 s 74 t 75 u 76 v 77 w 78 x 79 y 7a z 7b { 7c | 7d } 7e ~ 7f DEL .Ed .Pp The .Nm decimal set: .Bd -literal -offset left 0 NUL 1 SOH 2 STX 3 ETX 4 EOT 5 ENQ 6 ACK 7 BEL 8 BS 9 HT 10 LF 11 VT 12 FF 13 CR 14 SO 15 SI 16 DLE 17 DC1 18 DC2 19 DC3 20 DC4 21 NAK 22 SYN 23 ETB 24 CAN 25 EM 26 SUB 27 ESC 28 FS 29 GS 30 RS 31 US 32 SP 33 ! 34 " 35 # 36 $ 37 % 38 & 39 ' 40 ( 41 ) 42 * 43 + 44 , 45 - 46 . 47 / 48 0 49 1 50 2 51 3 52 4 53 5 54 6 55 7 56 8 57 9 58 : 59 ; 60 < 61 = 62 > 63 ? 64 @ 65 A 66 B 67 C 68 D 69 E 70 F 71 G 72 H 73 I 74 J 75 K 76 L 77 M 78 N 79 O 80 P 81 Q 82 R 83 S 84 T 85 U 86 V 87 W 88 X 89 Y 90 Z 91 [ 92 \e\ 93 ] 94 ^ 95 _ 96 ` 97 a 98 b 99 c 100 d 101 e 102 f 103 g 104 h 105 i 106 j 107 k 108 l 109 m 110 n 111 o 112 p 113 q 114 r 115 s 116 t 117 u 118 v 119 w 120 x 121 y 122 z 123 { 124 | 125 } 126 ~ 127 DEL .Ed .Pp The .Nm binary set: .Bd -literal -offset left 00 01 10 11 NUL SP @ ` 00000 SOH ! A a 00001 STX " B b 00010 ETX # C c 00011 EOT $ D d 00100 ENQ % E e 00101 ACK & F f 00110 BEL ' G g 00111 BS ( H h 01000 HT ) I i 01001 LF * J j 01010 VT + K k 01011 FF , L l 01100 CR - M m 01101 SO . N n 01110 SI / O o 01111 DLE 0 P p 10000 DC1 1 Q q 10001 DC2 2 R r 10010 DC3 3 S s 10011 DC4 4 T t 10100 NAK 5 U u 10101 SYN 6 V v 10110 ETB 7 W w 10111 CAN 8 X x 11000 EM 9 Y y 11001 SUB : Z z 11010 ESC ; [ { 11011 FS < \ | 11100 GS = ] } 11101 RS > ^ - 11110 US ? _ DEL 11111 .Ed .Sh FILES .Bl -tag -width /usr/share/misc/ascii -compact .It Pa /usr/share/misc/ascii .El .Sh HISTORY An .Nm manual page appeared in .At v7 . Index: projects/clang500-import/share/mk/local.meta.sys.mk =================================================================== --- projects/clang500-import/share/mk/local.meta.sys.mk (revision 319548) +++ projects/clang500-import/share/mk/local.meta.sys.mk (revision 319549) @@ -1,288 +1,285 @@ # $FreeBSD$ # local configuration specific to meta mode # XXX some of this should be in meta.sys.mk # we assume that MK_DIRDEPS_BUILD=yes # we need this until there is an alternative MK_INSTALL_AS_USER= yes _default_makeobjdir=$${.CURDIR:S,^$${SRCTOP},$${OBJTOP},} .if empty(OBJROOT) || ${.MAKE.LEVEL} == 0 .if defined(MAKEOBJDIRPREFIX) && !empty(MAKEOBJDIRPREFIX) # put things approximately where they want OBJROOT:=${MAKEOBJDIRPREFIX}${SRCTOP}/ MAKEOBJDIRPREFIX= .export MAKEOBJDIRPREFIX .endif .if empty(MAKEOBJDIR) # OBJTOP set below MAKEOBJDIR=${_default_makeobjdir} # export but do not track .export-env MAKEOBJDIR # Expand for our own use MAKEOBJDIR:= ${MAKEOBJDIR} .endif .if !empty(SB) SB_OBJROOT ?= ${SB}/obj/ # this is what we use below OBJROOT ?= ${SB_OBJROOT} .endif OBJROOT ?= /usr/obj${SRCTOP}/ .if ${OBJROOT:M*/} != "" OBJROOT:= ${OBJROOT:H:tA}/ .else OBJROOT:= ${OBJROOT:H:tA}/${OBJROOT:T} .endif .export OBJROOT SRCTOP # we need HOST_TARGET etc below. .include .export HOST_TARGET .endif # from src/Makefile (for universe) TARGET_ARCHES_arm?= arm armeb armv6 TARGET_ARCHES_arm64?= aarch64 TARGET_ARCHES_mips?= mipsel mips mips64el mips64 mipsn32 mipsn32el TARGET_ARCHES_powerpc?= powerpc powerpc64 powerpcspe TARGET_ARCHES_riscv?= riscv64 riscv64sf # some corner cases BOOT_MACHINE_DIR.amd64 = boot/i386 MACHINE_ARCH.host = ${_HOST_ARCH} # the list of machines we support ALL_MACHINE_LIST?= amd64 arm arm64 i386 mips powerpc riscv sparc64 .for m in ${ALL_MACHINE_LIST:O:u} MACHINE_ARCH_LIST.$m?= ${TARGET_ARCHES_${m}:U$m} MACHINE_ARCH.$m?= ${MACHINE_ARCH_LIST.$m:[1]} BOOT_MACHINE_DIR.$m ?= boot/$m .endfor .ifndef _TARGET_SPEC .if empty(MACHINE_ARCH) .if !empty(TARGET_ARCH) MACHINE_ARCH= ${TARGET_ARCH} .else MACHINE_ARCH= ${MACHINE_ARCH.${MACHINE}} .endif .endif MACHINE_ARCH?= ${MACHINE_ARCH.${MACHINE}} MACHINE_ARCH:= ${MACHINE_ARCH} .else # we got here via dirdeps MACHINE_ARCH:= ${MACHINE_ARCH.${MACHINE}} .endif # now because for universe we want to potentially # build for multiple MACHINE_ARCH per MACHINE # we need more than MACHINE in TARGET_SPEC TARGET_SPEC_VARS= MACHINE MACHINE_ARCH # see dirdeps.mk .if ${TARGET_SPEC:Uno:M*,*} != "" _tspec := ${TARGET_SPEC:S/,/ /g} MACHINE := ${_tspec:[1]} MACHINE_ARCH := ${_tspec:[2]} # etc. # We need to stop that TARGET_SPEC affecting any submakes # and deal with MACHINE=${TARGET_SPEC} in the environment. TARGET_SPEC= # export but do not track .export-env TARGET_SPEC .export ${TARGET_SPEC_VARS} .for v in ${TARGET_SPEC_VARS:O:u} .if empty($v) .undef $v .endif .endfor .endif # make sure we know what TARGET_SPEC is # as we may need it to find Makefile.depend* TARGET_SPEC = ${TARGET_SPEC_VARS:@v@${$v:U}@:ts,} # to be consistent with src/Makefile just concatenate with '.'s TARGET_OBJ_SPEC:= ${TARGET_SPEC:S;,;.;g} OBJTOP:= ${OBJROOT}${TARGET_OBJ_SPEC} .if defined(MAKEOBJDIR) .if ${MAKEOBJDIR:M*/*} == "" .error Cannot use MAKEOBJDIR=${MAKEOBJDIR}${.newline}Unset MAKEOBJDIR to get default: MAKEOBJDIR='${_default_makeobjdir}' .endif .endif HOST_OBJTOP ?= ${OBJROOT}${HOST_TARGET} .if ${OBJTOP} == ${HOST_OBJTOP} || ${REQUESTED_MACHINE:U${MACHINE}} == "host" MACHINE= host .if ${TARGET_MACHINE:Uno} == ${HOST_TARGET} # not what we want TARGET_MACHINE= host .endif .endif .if ${MACHINE} == "host" OBJTOP := ${HOST_OBJTOP} .endif .if ${.MAKE.LEVEL} == 0 PYTHON ?= /usr/local/bin/python .export PYTHON # this works best if share/mk is ready for it. BUILD_AT_LEVEL0= no # _SKIP_BUILD is not 100% as it requires wrapping all 'all:' targets to avoid # building in MAKELEVEL0. Just prohibit 'all' entirely in this case to avoid # problems. .if ${MK_DIRDEPS_BUILD} == "yes" && \ ${.MAKE.LEVEL} == 0 && ${BUILD_AT_LEVEL0:Uyes:tl} == "no" .MAIN: dirdeps .if make(all) .error DIRDEPS_BUILD: Please run '${MAKE}' instead of '${MAKE} all'. .endif .endif # we want to end up with a singe stage tree for all machines .if ${MK_STAGING} == "yes" .if empty(STAGE_ROOT) STAGE_ROOT?= ${OBJROOT}stage .export STAGE_ROOT .endif .endif .endif .if ${MK_STAGING} == "yes" .if ${MACHINE} == "host" STAGE_MACHINE= ${HOST_TARGET} .else STAGE_MACHINE:= ${TARGET_OBJ_SPEC} .endif STAGE_OBJTOP:= ${STAGE_ROOT}/${STAGE_MACHINE} STAGE_COMMON_OBJTOP:= ${STAGE_ROOT}/common STAGE_TARGET_OBJTOP:= ${STAGE_ROOT}/${TARGET_OBJ_SPEC} STAGE_HOST_OBJTOP:= ${STAGE_ROOT}/${HOST_TARGET} # These are exported for hooking in out-of-tree builds. They will always # be overridden in sub-makes above when building in-tree. .export STAGE_OBJTOP STAGE_TARGET_OBJTOP STAGE_HOST_OBJTOP # Use tools/install.sh which can avoid the need for xinstall for simple cases. INSTALL?= sh ${SRCTOP}/tools/install.sh # This is for stage-install to pickup from the environment. REAL_INSTALL:= ${INSTALL} .export REAL_INSTALL STAGE_INSTALL= sh ${.PARSEDIR:tA}/stage-install.sh OBJDIR=${.OBJDIR:tA} STAGE_LIBDIR= ${STAGE_OBJTOP}${_LIBDIR:U${LIBDIR:U/lib}} STAGE_INCLUDEDIR= ${STAGE_OBJTOP}${INCLUDEDIR:U/usr/include} # this is not the same as INCLUDEDIR STAGE_INCSDIR= ${STAGE_OBJTOP}${INCSDIR:U/include} # the target is usually an absolute path STAGE_SYMLINKS_DIR= ${STAGE_OBJTOP} LDFLAGS_LAST+= -Wl,-rpath-link,${STAGE_LIBDIR} .if ${MK_SYSROOT} == "yes" SYSROOT?= ${STAGE_OBJTOP} .else LDFLAGS_LAST+= -L${STAGE_LIBDIR} .endif .endif # MK_STAGING # this is sufficient for most of the tree. .MAKE.DEPENDFILE_DEFAULT = ${.MAKE.DEPENDFILE_PREFIX} # but if we have a machine qualified file it should be used in preference .MAKE.DEPENDFILE_PREFERENCE = \ ${.MAKE.DEPENDFILE_PREFIX}.${MACHINE} \ ${.MAKE.DEPENDFILE_PREFIX} .undef .MAKE.DEPENDFILE .include "sys.dependfile.mk" .if ${.MAKE.LEVEL} > 0 && ${MACHINE} == "host" && ${.MAKE.DEPENDFILE:E} != "host" # we can use this but should not update it. UPDATE_DEPENDFILE= NO .endif # define the list of places that contain files we are responsible for .MAKE.META.BAILIWICK = ${SB} ${OBJROOT} ${STAGE_ROOT} CSU_DIR.${MACHINE_ARCH} ?= csu/${MACHINE_ARCH} CSU_DIR := ${CSU_DIR.${MACHINE_ARCH}} .if !empty(TIME_STAMP) TRACER= ${TIME_STAMP} ${:U} .endif .if !defined(_RECURSING_PROGS) && !defined(_RECURSING_CRUNCH) && \ !make(print-dir) WITH_META_STATS= t .endif # toolchains can be a pain - especially bootstrappping them .if ${MACHINE} == "host" MK_SHARED_TOOLCHAIN= no .endif TOOLCHAIN_VARS= AS AR CC CLANG_TBLGEN CXX CPP LD NM OBJCOPY RANLIB \ STRINGS SIZE LLVM_TBLGEN _toolchain_bin_CLANG_TBLGEN= /usr/bin/clang-tblgen _toolchain_bin_LLVM_TBLGEN= /usr/bin/llvm-tblgen _toolchain_bin_CXX= /usr/bin/c++ .ifdef WITH_TOOLSDIR TOOLSDIR?= ${HOST_OBJTOP}/tools .elif defined(STAGE_HOST_OBJTOP) TOOLSDIR?= ${STAGE_HOST_OBJTOP} .endif # Only define if it exists in case user didn't run bootstrap-tools. Otherwise # the tool will be built during the build. Building it assumes it is # TARGET==MACHINE. .if exists(${HOST_OBJTOP}/tools${.CURDIR}) BTOOLSPATH= ${HOST_OBJTOP}/tools${.CURDIR} .endif # Don't use the bootstrap tools logic on itself. .if ${.TARGETS:Mbootstrap-tools} == "" && \ !make(showconfig) && \ !defined(BOOTSTRAPPING_TOOLS) && !empty(TOOLSDIR) && ${.MAKE.LEVEL} == 0 .for dir in /sbin /bin /usr/sbin /usr/bin PATH:= ${TOOLSDIR}${dir}:${PATH} .endfor .export PATH # Prefer the TOOLSDIR version of the toolchain if present vs the host version. .for var in ${TOOLCHAIN_VARS} _toolchain_bin.${var}= ${TOOLSDIR}${_toolchain_bin_${var}:U/usr/bin/${var:tl}} .if exists(${_toolchain_bin.${var}}) HOST_${var}?= ${_toolchain_bin.${var}} .export HOST_${var} .endif .endfor .endif .for var in ${TOOLCHAIN_VARS} HOST_${var}?= ${_toolchain_bin_${var}:U/usr/bin/${var:tl}} .endfor .if ${MACHINE} == "host" .for var in ${TOOLCHAIN_VARS} ${var}= ${HOST_${var}} .endfor .endif .if ${MACHINE:Nhost:Ncommon} != "" && ${MACHINE} != ${HOST_MACHINE} # cross-building .if !defined(FREEBSD_REVISION) FREEBSD_REVISION!= sed -n '/^REVISION=/{s,.*=,,;s,",,g;p; }' ${SRCTOP}/sys/conf/newvers.sh .export FREEBSD_REVISION .endif CROSS_TARGET_FLAGS= -target ${MACHINE_ARCH}-unknown-freebsd${FREEBSD_REVISION} CFLAGS+= ${CROSS_TARGET_FLAGS} ACFLAGS+= ${CROSS_TARGET_FLAGS} LDFLAGS+= -Wl,-m -Wl,elf_${MACHINE_ARCH}_fbsd .endif META_MODE+= missing-meta=yes .if empty(META_MODE:Mnofilemon) META_MODE+= missing-filemon=yes .endif -# We do not want everything out-of-date just because -# some unrelated shared lib updated this. -.MAKE.META.IGNORE_PATHS+= /usr/local/etc/libmap.d Index: projects/clang500-import/share/mk/sys.mk =================================================================== --- projects/clang500-import/share/mk/sys.mk (revision 319548) +++ projects/clang500-import/share/mk/sys.mk (revision 319549) @@ -1,332 +1,336 @@ # from: @(#)sys.mk 8.2 (Berkeley) 3/21/94 # $FreeBSD$ unix ?= We run FreeBSD, not UNIX. .FreeBSD ?= true .if !defined(%POSIX) # # MACHINE_CPUARCH defines a collection of MACHINE_ARCH. Machines with # the same MACHINE_ARCH can run each other's binaries, so it necessarily # has word size and endian swizzled in. However, support files for # these machines often are shared amongst all combinations of size # and/or endian. This is called MACHINE_CPU in NetBSD, but that's used # for something different in FreeBSD. # MACHINE_CPUARCH=${MACHINE_ARCH:C/mips(n32|64)?(el)?(hf)?/mips/:C/arm(v6)?(eb|hf)?/arm/:C/powerpc(64|spe)/powerpc/:C/riscv64(sf)?/riscv/} .endif # Some options we need now __DEFAULT_NO_OPTIONS= \ DIRDEPS_BUILD \ DIRDEPS_CACHE __DEFAULT_DEPENDENT_OPTIONS= \ AUTO_OBJ/DIRDEPS_BUILD \ META_MODE/DIRDEPS_BUILD \ STAGING/DIRDEPS_BUILD \ SYSROOT/DIRDEPS_BUILD __ENV_ONLY_OPTIONS:= \ ${__DEFAULT_NO_OPTIONS} \ ${__DEFAULT_YES_OPTIONS} \ ${__DEFAULT_DEPENDENT_OPTIONS:H} # early include for customization # see local.sys.mk below # Not included when building in fmake compatibility mode (still needed # for older system support) .if defined(.PARSEDIR) .sinclude .include # Disable MK_META_MODE with make -B .if ${MK_META_MODE} == "yes" && defined(.MAKEFLAGS) && ${.MAKEFLAGS:M-B} MK_META_MODE= no .endif .if ${MK_DIRDEPS_BUILD} == "yes" .sinclude .elif ${MK_META_MODE} == "yes" # verbose will show .MAKE.META.PREFIX for each target. META_MODE+= meta verbose .if !defined(NO_META_MISSING) META_MODE+= missing-meta=yes .endif # silent will hide command output if a .meta file is created. .if !defined(NO_SILENT) META_MODE+= silent=yes .endif .if !exists(/dev/filemon) META_MODE+= nofilemon .endif # Require filemon data with bmake .if empty(META_MODE:Mnofilemon) META_MODE+= missing-filemon=yes .endif .endif META_MODE?= normal .export META_MODE .MAKE.MODE?= ${META_MODE} .if !empty(.MAKE.MODE:Mmeta) && !defined(NO_META_IGNORE_HOST) # Ignore host file changes that will otherwise cause # buildworld -> installworld -> buildworld to rebuild everything. # Since the build is self-reliant and bootstraps everything it needs, # this should not be a real problem for incremental builds. # XXX: This relies on the existing host tools retaining ABI compatibility # through upgrades since they won't be rebuilt on header/library changes. # Note that these are prefix matching, so /lib matches /libexec. .MAKE.META.IGNORE_PATHS+= \ ${__MAKE_SHELL} \ /bin \ /lib \ /rescue \ /sbin \ /usr/bin \ /usr/include \ /usr/lib \ /usr/sbin \ /usr/share \ .endif - +.if !empty(.MAKE.MODE:Mmeta) +# We do not want everything out-of-date just because +# some unrelated shared lib updated this. +.MAKE.META.IGNORE_PATHS+= /usr/local/etc/libmap.d +.endif .if ${MK_AUTO_OBJ} == "yes" # This needs to be done early - before .PATH is computed # Don't do this for 'make showconfig' as it enables all options where meta mode # is not expected. .if !make(showconfig) && !make(print-dir) .sinclude .endif .endif .else # bmake .include .endif # If the special target .POSIX appears (without prerequisites or # commands) before the first noncomment line in the makefile, make shall # process the makefile as specified by the Posix 1003.2 specification. # make(1) sets the special macro %POSIX in this case (to the actual # value "1003.2", for what it's worth). # # The rules below use this macro to distinguish between Posix-compliant # and default behaviour. # # This functionality is currently broken, since make(1) processes sys.mk # before reading any other files, and consequently has no opportunity to # set the %POSIX macro before we read this point. .if defined(%POSIX) .SUFFIXES: .o .c .y .l .a .sh .f .else .SUFFIXES: .out .a .ln .o .bco .llo .c .cc .cpp .cxx .C .m .F .f .e .r .y .l .S .asm .s .cl .p .h .sh .endif AR ?= ar .if defined(%POSIX) ARFLAGS ?= -rv .else ARFLAGS ?= -crD .endif RANLIB ?= ranlib .if !defined(%POSIX) RANLIBFLAGS ?= -D .endif AS ?= as AFLAGS ?= ACFLAGS ?= .if defined(%POSIX) CC ?= c89 CFLAGS ?= -O .else CC ?= cc .if ${MACHINE_CPUARCH} == "arm" || ${MACHINE_CPUARCH} == "mips" CFLAGS ?= -O -pipe .else CFLAGS ?= -O2 -pipe .endif .if defined(NO_STRICT_ALIASING) CFLAGS += -fno-strict-aliasing .endif .endif IR_CFLAGS ?= ${STATIC_CFLAGS:N-O*} ${CFLAGS:N-O*} PO_CFLAGS ?= ${CFLAGS} # cp(1) is used to copy source files to ${.OBJDIR}, make sure it can handle # read-only files as non-root by passing -f. CP ?= cp -f CPP ?= cpp # C Type Format data is required for DTrace CTFFLAGS ?= -L VERSION CTFCONVERT ?= ctfconvert CTFMERGE ?= ctfmerge .if defined(CFLAGS) && (${CFLAGS:M-g} != "") CTFFLAGS += -g .endif CXX ?= c++ CXXFLAGS ?= ${CFLAGS:N-std=*:N-Wnested-externs:N-W*-prototypes:N-Wno-pointer-sign:N-Wold-style-definition} IR_CXXFLAGS ?= ${STATIC_CXXFLAGS:N-O*} ${CXXFLAGS:N-O*} PO_CXXFLAGS ?= ${CXXFLAGS} DTRACE ?= dtrace DTRACEFLAGS ?= -C -x nolibs .if empty(.MAKEFLAGS:M-s) ECHO ?= echo ECHODIR ?= echo .else ECHO ?= true .if ${.MAKEFLAGS:M-s} == "-s" ECHODIR ?= echo .else ECHODIR ?= true .endif .endif .if ${.MAKEFLAGS:M-N} # bmake -N is supposed to skip executing anything but it does not skip # exeucting '+' commands. The '+' feature is used where .MAKE # is not safe for the entire target. -N is intended to skip building sub-makes # so it executing '+' commands is not right. Work around the bug by not # setting '+' when -N is used. _+_ ?= .else _+_ ?= + .endif .if defined(%POSIX) FC ?= fort77 FFLAGS ?= -O 1 .else FC ?= f77 FFLAGS ?= -O .endif EFLAGS ?= INSTALL ?= install LEX ?= lex LFLAGS ?= # LDFLAGS is for CC, _LDFLAGS is for LD. Generate _LDFLAGS from # LDFLAGS by stripping -Wl, from pass-through arguments and dropping # compiler driver flags (e.g. -mabi=*) that conflict with flags to LD. LD ?= ld LDFLAGS ?= _LDFLAGS = ${LDFLAGS:S/-Wl,//g:N-mabi=*} LINT ?= lint LINTFLAGS ?= -cghapbx LINTKERNFLAGS ?= ${LINTFLAGS} LINTOBJFLAGS ?= -cghapbxu -i LINTOBJKERNFLAGS?= ${LINTOBJFLAGS} LINTLIBFLAGS ?= -cghapbxu -C ${LIB} MAKE ?= make .if !defined(%POSIX) LLVM_LINK ?= llvm-link LORDER ?= lorder NM ?= nm NMFLAGS ?= OBJC ?= cc OBJCFLAGS ?= ${OBJCINCLUDES} ${CFLAGS} -Wno-import OBJCOPY ?= objcopy PC ?= pc PFLAGS ?= RC ?= f77 RFLAGS ?= TSORT ?= tsort TSORTFLAGS ?= -q .endif SHELL ?= sh .if !defined(%POSIX) SIZE ?= size .endif YACC ?= yacc .if defined(%POSIX) YFLAGS ?= .else YFLAGS ?= -d .endif .if defined(%POSIX) .include "bsd.suffixes-posix.mk" .else # non-Posix rule set .include "bsd.suffixes.mk" # Pull in global settings. __MAKE_CONF?=/etc/make.conf .if exists(${__MAKE_CONF}) .include "${__MAKE_CONF}" .endif # late include for customization .sinclude .if defined(META_MODE) META_MODE:= ${META_MODE:O:u} .endif .if defined(__MAKE_SHELL) && !empty(__MAKE_SHELL) SHELL= ${__MAKE_SHELL} .SHELL: path=${__MAKE_SHELL} .endif # Tell bmake to expand -V VAR by default .MAKE.EXPAND_VARIABLES= yes # Tell bmake the makefile preference .MAKE.MAKEFILE_PREFERENCE= BSDmakefile makefile Makefile # Tell bmake to always pass job tokens, regardless of target depending on # .MAKE or looking like ${MAKE}/${.MAKE}/$(MAKE)/$(.MAKE)/make. .MAKE.ALWAYS_PASS_JOB_QUEUE= yes # By default bmake does *not* use set -e # when running target scripts, this is a problem for many makefiles here. # So define a shell that will do what FreeBSD expects. .ifndef WITHOUT_SHELL_ERRCTL __MAKE_SHELL?=/bin/sh .SHELL: name=sh \ quiet="set -" echo="set -v" filter="set -" \ hasErrCtl=yes check="set -e" ignore="set +e" \ echoFlag=v errFlag=e \ path=${__MAKE_SHELL} .endif # Hack for ports compatibility. Historically, ports makefiles have # assumed they can examine MACHINE_CPU without including anything # because this was automatically included in sys.mk. For /usr/src, # this file has moved to being included from bsd.opts.mk. Until all # the ports files are modernized, and a reasonable transition # period has passed, include it while we're in a ports tree here # to preserve historic behavior. .if exists(${.CURDIR}/../../Mk/bsd.port.mk) .include .endif .endif # ! Posix Index: projects/clang500-import/sys/amd64/amd64/pmap.c =================================================================== --- projects/clang500-import/sys/amd64/amd64/pmap.c (revision 319548) +++ projects/clang500-import/sys/amd64/amd64/pmap.c (revision 319549) @@ -1,7307 +1,7308 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 2003 Peter Wemm * All rights reserved. * Copyright (c) 2005-2010 Alan L. Cox * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 */ /*- * Copyright (c) 2003 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Jake Burkholder, * Safeport Network Services, and Network Associates Laboratories, the * Security Research Division of Network Associates, Inc. under * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA * CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define AMD64_NPT_AWARE #include __FBSDID("$FreeBSD$"); /* * Manages physical address maps. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ #include "opt_pmap.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif static __inline boolean_t pmap_type_guest(pmap_t pmap) { return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI)); } static __inline boolean_t pmap_emulate_ad_bits(pmap_t pmap) { return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0); } static __inline pt_entry_t pmap_valid_bit(pmap_t pmap) { pt_entry_t mask; switch (pmap->pm_type) { case PT_X86: case PT_RVI: mask = X86_PG_V; break; case PT_EPT: if (pmap_emulate_ad_bits(pmap)) mask = EPT_PG_EMUL_V; else mask = EPT_PG_READ; break; default: panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type); } return (mask); } static __inline pt_entry_t pmap_rw_bit(pmap_t pmap) { pt_entry_t mask; switch (pmap->pm_type) { case PT_X86: case PT_RVI: mask = X86_PG_RW; break; case PT_EPT: if (pmap_emulate_ad_bits(pmap)) mask = EPT_PG_EMUL_RW; else mask = EPT_PG_WRITE; break; default: panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type); } return (mask); } static __inline pt_entry_t pmap_global_bit(pmap_t pmap) { pt_entry_t mask; switch (pmap->pm_type) { case PT_X86: mask = X86_PG_G; break; case PT_RVI: case PT_EPT: mask = 0; break; default: panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type); } return (mask); } static __inline pt_entry_t pmap_accessed_bit(pmap_t pmap) { pt_entry_t mask; switch (pmap->pm_type) { case PT_X86: case PT_RVI: mask = X86_PG_A; break; case PT_EPT: if (pmap_emulate_ad_bits(pmap)) mask = EPT_PG_READ; else mask = EPT_PG_A; break; default: panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type); } return (mask); } static __inline pt_entry_t pmap_modified_bit(pmap_t pmap) { pt_entry_t mask; switch (pmap->pm_type) { case PT_X86: case PT_RVI: mask = X86_PG_M; break; case PT_EPT: if (pmap_emulate_ad_bits(pmap)) mask = EPT_PG_WRITE; else mask = EPT_PG_M; break; default: panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type); } return (mask); } extern struct pcpu __pcpu[]; #if !defined(DIAGNOSTIC) #ifdef __GNUC_GNU_INLINE__ #define PMAP_INLINE __attribute__((__gnu_inline__)) inline #else #define PMAP_INLINE extern inline #endif #else #define PMAP_INLINE #endif #ifdef PV_STATS #define PV_STAT(x) do { x ; } while (0) #else #define PV_STAT(x) do { } while (0) #endif #define pa_index(pa) ((pa) >> PDRSHIFT) #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) #define NPV_LIST_LOCKS MAXCPU #define PHYS_TO_PV_LIST_LOCK(pa) \ (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ struct rwlock **_lockp = (lockp); \ struct rwlock *_new_lock; \ \ _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \ if (_new_lock != *_lockp) { \ if (*_lockp != NULL) \ rw_wunlock(*_lockp); \ *_lockp = _new_lock; \ rw_wlock(*_lockp); \ } \ } while (0) #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) #define RELEASE_PV_LIST_LOCK(lockp) do { \ struct rwlock **_lockp = (lockp); \ \ if (*_lockp != NULL) { \ rw_wunlock(*_lockp); \ *_lockp = NULL; \ } \ } while (0) #define VM_PAGE_TO_PV_LIST_LOCK(m) \ PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ int nkpt; SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0, "Number of kernel page table pages allocated on bootup"); static int ndmpdp; vm_paddr_t dmaplimit; vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; pt_entry_t pg_nx; static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); static int pat_works = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1, "Is page attribute table fully functional?"); static int pg_ps_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pg_ps_enabled, 0, "Are large page mappings enabled?"); #define PAT_INDEX_SIZE 8 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ static u_int64_t KPTphys; /* phys addr of kernel level 1 */ static u_int64_t KPDphys; /* phys addr of kernel level 2 */ u_int64_t KPDPphys; /* phys addr of kernel level 3 */ u_int64_t KPML4phys; /* phys addr of kernel level 4 */ static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */ static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ static int ndmpdpphys; /* number of DMPDPphys pages */ /* * pmap_mapdev support pre initialization (i.e. console) */ #define PMAP_PREINIT_MAPPING_COUNT 8 static struct pmap_preinit_mapping { vm_paddr_t pa; vm_offset_t va; vm_size_t sz; int mode; } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; static int pmap_initialized; /* * Data for the pv entry allocation mechanism. * Updates to pv_invl_gen are protected by the pv_list_locks[] * elements, but reads are not. */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); static struct mtx pv_chunks_mutex; static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; static u_long pv_invl_gen[NPV_LIST_LOCKS]; static struct md_page *pv_table; static struct md_page pv_dummy; /* * All those kernel PT submaps that BSD is so fond of */ pt_entry_t *CMAP1 = NULL; caddr_t CADDR1 = 0; static vm_offset_t qframe = 0; static struct mtx qframe_mtx; static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */ int pmap_pcid_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?"); int invpcid_works = 0; SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0, "Is the invpcid instruction available ?"); static int pmap_pcid_save_cnt_proc(SYSCTL_HANDLER_ARGS) { int i; uint64_t res; res = 0; CPU_FOREACH(i) { res += cpuid_to_pcpu[i]->pc_pm_save_cnt; } return (sysctl_handle_64(oidp, &res, 0, req)); } SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU", "Count of saved TLB context on switch"); static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker = LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker); static struct mtx invl_gen_mtx; static u_long pmap_invl_gen = 0; /* Fake lock object to satisfy turnstiles interface. */ static struct lock_object invl_gen_ts = { .lo_name = "invlts", }; #define PMAP_ASSERT_NOT_IN_DI() \ KASSERT(curthread->td_md.md_invl_gen.gen == 0, ("DI already started")) /* * Start a new Delayed Invalidation (DI) block of code, executed by * the current thread. Within a DI block, the current thread may * destroy both the page table and PV list entries for a mapping and * then release the corresponding PV list lock before ensuring that * the mapping is flushed from the TLBs of any processors with the * pmap active. */ static void pmap_delayed_invl_started(void) { struct pmap_invl_gen *invl_gen; u_long currgen; invl_gen = &curthread->td_md.md_invl_gen; PMAP_ASSERT_NOT_IN_DI(); mtx_lock(&invl_gen_mtx); if (LIST_EMPTY(&pmap_invl_gen_tracker)) currgen = pmap_invl_gen; else currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen; invl_gen->gen = currgen + 1; LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link); mtx_unlock(&invl_gen_mtx); } /* * Finish the DI block, previously started by the current thread. All * required TLB flushes for the pages marked by * pmap_delayed_invl_page() must be finished before this function is * called. * * This function works by bumping the global DI generation number to * the generation number of the current thread's DI, unless there is a * pending DI that started earlier. In the latter case, bumping the * global DI generation number would incorrectly signal that the * earlier DI had finished. Instead, this function bumps the earlier * DI's generation number to match the generation number of the * current thread's DI. */ static void pmap_delayed_invl_finished(void) { struct pmap_invl_gen *invl_gen, *next; struct turnstile *ts; invl_gen = &curthread->td_md.md_invl_gen; KASSERT(invl_gen->gen != 0, ("missed invl_started")); mtx_lock(&invl_gen_mtx); next = LIST_NEXT(invl_gen, link); if (next == NULL) { turnstile_chain_lock(&invl_gen_ts); ts = turnstile_lookup(&invl_gen_ts); pmap_invl_gen = invl_gen->gen; if (ts != NULL) { turnstile_broadcast(ts, TS_SHARED_QUEUE); turnstile_unpend(ts, TS_SHARED_LOCK); } turnstile_chain_unlock(&invl_gen_ts); } else { next->gen = invl_gen->gen; } LIST_REMOVE(invl_gen, link); mtx_unlock(&invl_gen_mtx); invl_gen->gen = 0; } #ifdef PV_STATS static long invl_wait; SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait, CTLFLAG_RD, &invl_wait, 0, "Number of times DI invalidation blocked pmap_remove_all/write"); #endif static u_long * pmap_delayed_invl_genp(vm_page_t m) { return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]); } /* * Ensure that all currently executing DI blocks, that need to flush * TLB for the given page m, actually flushed the TLB at the time the * function returned. If the page m has an empty PV list and we call * pmap_delayed_invl_wait(), upon its return we know that no CPU has a * valid mapping for the page m in either its page table or TLB. * * This function works by blocking until the global DI generation * number catches up with the generation number associated with the * given page m and its PV list. Since this function's callers * typically own an object lock and sometimes own a page lock, it * cannot sleep. Instead, it blocks on a turnstile to relinquish the * processor. */ static void pmap_delayed_invl_wait(vm_page_t m) { struct thread *td; struct turnstile *ts; u_long *m_gen; #ifdef PV_STATS bool accounted = false; #endif td = curthread; m_gen = pmap_delayed_invl_genp(m); while (*m_gen > pmap_invl_gen) { #ifdef PV_STATS if (!accounted) { atomic_add_long(&invl_wait, 1); accounted = true; } #endif ts = turnstile_trywait(&invl_gen_ts); if (*m_gen > pmap_invl_gen) turnstile_wait(ts, NULL, TS_SHARED_QUEUE); else turnstile_cancel(ts); } } /* * Mark the page m's PV list as participating in the current thread's * DI block. Any threads concurrently using m's PV list to remove or * restrict all mappings to m will wait for the current thread's DI * block to complete before proceeding. * * The function works by setting the DI generation number for m's PV * list to at least the DI generation number of the current thread. * This forces a caller of pmap_delayed_invl_wait() to block until * current thread calls pmap_delayed_invl_finished(). */ static void pmap_delayed_invl_page(vm_page_t m) { u_long gen, *m_gen; rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED); gen = curthread->td_md.md_invl_gen.gen; if (gen == 0) return; m_gen = pmap_delayed_invl_genp(m); if (*m_gen < gen) *m_gen = gen; } /* * Crashdump maps. */ static caddr_t crashdumpmap; static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); static int popcnt_pc_map_pq(uint64_t *map); static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); static void reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp); static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode); static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp); static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va); static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, struct rwlock **lockp); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde); static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask); static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp); static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask); static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, struct spglist *free, struct rwlock **lockp); static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, struct spglist *free); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde); static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde); static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp); static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp); static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp); static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); static vm_offset_t pmap_kmem_choose(vm_offset_t addr); /* * Move the kernel virtual free pointer to the next * 2MB. This is used to help improve performance * by using a large (2MB) page for much of the kernel * (.text, .data, .bss) */ static vm_offset_t pmap_kmem_choose(vm_offset_t addr) { vm_offset_t newaddr = addr; newaddr = roundup2(addr, NBPDR); return (newaddr); } /********************/ /* Inline functions */ /********************/ /* Return a non-clipped PD index for a given VA */ static __inline vm_pindex_t pmap_pde_pindex(vm_offset_t va) { return (va >> PDRSHIFT); } /* Return a pointer to the PML4 slot that corresponds to a VA */ static __inline pml4_entry_t * pmap_pml4e(pmap_t pmap, vm_offset_t va) { return (&pmap->pm_pml4[pmap_pml4e_index(va)]); } /* Return a pointer to the PDP slot that corresponds to a VA */ static __inline pdp_entry_t * pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va) { pdp_entry_t *pdpe; pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME); return (&pdpe[pmap_pdpe_index(va)]); } /* Return a pointer to the PDP slot that corresponds to a VA */ static __inline pdp_entry_t * pmap_pdpe(pmap_t pmap, vm_offset_t va) { pml4_entry_t *pml4e; pt_entry_t PG_V; PG_V = pmap_valid_bit(pmap); pml4e = pmap_pml4e(pmap, va); if ((*pml4e & PG_V) == 0) return (NULL); return (pmap_pml4e_to_pdpe(pml4e, va)); } /* Return a pointer to the PD slot that corresponds to a VA */ static __inline pd_entry_t * pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va) { pd_entry_t *pde; pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME); return (&pde[pmap_pde_index(va)]); } /* Return a pointer to the PD slot that corresponds to a VA */ static __inline pd_entry_t * pmap_pde(pmap_t pmap, vm_offset_t va) { pdp_entry_t *pdpe; pt_entry_t PG_V; PG_V = pmap_valid_bit(pmap); pdpe = pmap_pdpe(pmap, va); if (pdpe == NULL || (*pdpe & PG_V) == 0) return (NULL); return (pmap_pdpe_to_pde(pdpe, va)); } /* Return a pointer to the PT slot that corresponds to a VA */ static __inline pt_entry_t * pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va) { pt_entry_t *pte; pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME); return (&pte[pmap_pte_index(va)]); } /* Return a pointer to the PT slot that corresponds to a VA */ static __inline pt_entry_t * pmap_pte(pmap_t pmap, vm_offset_t va) { pd_entry_t *pde; pt_entry_t PG_V; PG_V = pmap_valid_bit(pmap); pde = pmap_pde(pmap, va); if (pde == NULL || (*pde & PG_V) == 0) return (NULL); if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */ return ((pt_entry_t *)pde); return (pmap_pde_to_pte(pde, va)); } static __inline void pmap_resident_count_inc(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); pmap->pm_stats.resident_count += count; } static __inline void pmap_resident_count_dec(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(pmap->pm_stats.resident_count >= count, ("pmap %p resident count underflow %ld %d", pmap, pmap->pm_stats.resident_count, count)); pmap->pm_stats.resident_count -= count; } PMAP_INLINE pt_entry_t * vtopte(vm_offset_t va) { u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va)); return (PTmap + ((va >> PAGE_SHIFT) & mask)); } static __inline pd_entry_t * vtopde(vm_offset_t va) { u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1); KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va)); return (PDmap + ((va >> PDRSHIFT) & mask)); } static u_int64_t allocpages(vm_paddr_t *firstaddr, int n) { u_int64_t ret; ret = *firstaddr; bzero((void *)ret, n * PAGE_SIZE); *firstaddr += n * PAGE_SIZE; return (ret); } CTASSERT(powerof2(NDMPML4E)); /* number of kernel PDP slots */ #define NKPDPE(ptpgs) howmany(ptpgs, NPDEPG) static void nkpt_init(vm_paddr_t addr) { int pt_pages; #ifdef NKPT pt_pages = NKPT; #else pt_pages = howmany(addr, 1 << PDRSHIFT); pt_pages += NKPDPE(pt_pages); /* * Add some slop beyond the bare minimum required for bootstrapping * the kernel. * * This is quite important when allocating KVA for kernel modules. * The modules are required to be linked in the negative 2GB of * the address space. If we run out of KVA in this region then * pmap_growkernel() will need to allocate page table pages to map * the entire 512GB of KVA space which is an unnecessary tax on * physical memory. * * Secondly, device memory mapped as part of setting up the low- * level console(s) is taken from KVA, starting at virtual_avail. * This is because cninit() is called after pmap_bootstrap() but * before vm_init() and pmap_init(). 20MB for a frame buffer is * not uncommon. */ pt_pages += 32; /* 64MB additional slop. */ #endif nkpt = pt_pages; } static void create_pagetables(vm_paddr_t *firstaddr) { int i, j, ndm1g, nkpdpe; pt_entry_t *pt_p; pd_entry_t *pd_p; pdp_entry_t *pdp_p; pml4_entry_t *p4_p; /* Allocate page table pages for the direct map */ ndmpdp = howmany(ptoa(Maxmem), NBPDP); if (ndmpdp < 4) /* Minimum 4GB of dirmap */ ndmpdp = 4; ndmpdpphys = howmany(ndmpdp, NPDPEPG); if (ndmpdpphys > NDMPML4E) { /* * Each NDMPML4E allows 512 GB, so limit to that, * and then readjust ndmpdp and ndmpdpphys. */ printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512); Maxmem = atop(NDMPML4E * NBPML4); ndmpdpphys = NDMPML4E; ndmpdp = NDMPML4E * NPDEPG; } DMPDPphys = allocpages(firstaddr, ndmpdpphys); ndm1g = 0; if ((amd_feature & AMDID_PAGE1GB) != 0) ndm1g = ptoa(Maxmem) >> PDPSHIFT; if (ndm1g < ndmpdp) DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g); dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; /* Allocate pages */ KPML4phys = allocpages(firstaddr, 1); KPDPphys = allocpages(firstaddr, NKPML4E); /* * Allocate the initial number of kernel page table pages required to * bootstrap. We defer this until after all memory-size dependent * allocations are done (e.g. direct map), so that we don't have to * build in too much slop in our estimate. * * Note that when NKPML4E > 1, we have an empty page underneath * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed) * pages. (pmap_enter requires a PD page to exist for each KPML4E.) */ nkpt_init(*firstaddr); nkpdpe = NKPDPE(nkpt); KPTphys = allocpages(firstaddr, nkpt); KPDphys = allocpages(firstaddr, nkpdpe); /* Fill in the underlying page table pages */ /* Nominally read-only (but really R/W) from zero to physfree */ /* XXX not fully used, underneath 2M pages */ pt_p = (pt_entry_t *)KPTphys; for (i = 0; ptoa(i) < *firstaddr; i++) pt_p[i] = ptoa(i) | X86_PG_RW | X86_PG_V | X86_PG_G; /* Now map the page tables at their location within PTmap */ pd_p = (pd_entry_t *)KPDphys; for (i = 0; i < nkpt; i++) pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V; /* Map from zero to end of allocations under 2M pages */ /* This replaces some of the KPTphys entries above */ for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) pd_p[i] = (i << PDRSHIFT) | X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G; /* And connect up the PD to the PDP (leaving room for L4 pages) */ pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE)); for (i = 0; i < nkpdpe; i++) pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V | PG_U; /* * Now, set up the direct map region using 2MB and/or 1GB pages. If * the end of physical memory is not aligned to a 1GB page boundary, * then the residual physical memory is mapped with 2MB pages. Later, * if pmap_mapdev{_attr}() uses the direct map for non-write-back * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings * that are partially used. */ pd_p = (pd_entry_t *)DMPDphys; for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) { pd_p[j] = (vm_paddr_t)i << PDRSHIFT; /* Preset PG_M and PG_A because demotion expects it. */ pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G | X86_PG_M | X86_PG_A | pg_nx; } pdp_p = (pdp_entry_t *)DMPDPphys; for (i = 0; i < ndm1g; i++) { pdp_p[i] = (vm_paddr_t)i << PDPSHIFT; /* Preset PG_M and PG_A because demotion expects it. */ pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G | X86_PG_M | X86_PG_A | pg_nx; } for (j = 0; i < ndmpdp; i++, j++) { pdp_p[i] = DMPDphys + ptoa(j); pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_U; } /* And recursively map PML4 to itself in order to get PTmap */ p4_p = (pml4_entry_t *)KPML4phys; p4_p[PML4PML4I] = KPML4phys; p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | PG_U; /* Connect the Direct Map slot(s) up to the PML4. */ for (i = 0; i < ndmpdpphys; i++) { p4_p[DMPML4I + i] = DMPDPphys + ptoa(i); p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | PG_U; } /* Connect the KVA slots up to the PML4 */ for (i = 0; i < NKPML4E; i++) { p4_p[KPML4BASE + i] = KPDPphys + ptoa(i); p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V | PG_U; } } /* * Bootstrap the system enough to run with virtual memory. * * On amd64 this is called after mapping has already been enabled * and just syncs the pmap module with what has already been done. * [We can't call it easily with mapping off since the kernel is not * mapped with PA == VA, hence we would have to relocate every address * from the linked base (virtual) address "KERNBASE" to the actual * (physical) address starting relative to 0] */ void pmap_bootstrap(vm_paddr_t *firstaddr) { vm_offset_t va; pt_entry_t *pte; int i; /* * Create an initial set of page tables to run the kernel in. */ create_pagetables(firstaddr); /* * Add a physical memory segment (vm_phys_seg) corresponding to the * preallocated kernel page table pages so that vm_page structures * representing these pages will be created. The vm_page structures * are required for promotion of the corresponding kernel virtual * addresses to superpage mappings. */ vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt)); virtual_avail = (vm_offset_t) KERNBASE + *firstaddr; virtual_avail = pmap_kmem_choose(virtual_avail); virtual_end = VM_MAX_KERNEL_ADDRESS; /* XXX do %cr0 as well */ load_cr4(rcr4() | CR4_PGE); load_cr3(KPML4phys); if (cpu_stdext_feature & CPUID_STDEXT_SMEP) load_cr4(rcr4() | CR4_SMEP); /* * Initialize the kernel pmap (which is statically allocated). */ PMAP_LOCK_INIT(kernel_pmap); kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys); kernel_pmap->pm_cr3 = KPML4phys; CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ TAILQ_INIT(&kernel_pmap->pm_pvchunk); kernel_pmap->pm_flags = pmap_flags; /* * Initialize the TLB invalidations generation number lock. */ mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF); /* * Reserve some special page table entries/VA space for temporary * mapping of pages. */ #define SYSMAP(c, p, v, n) \ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); va = virtual_avail; pte = vtopte(va); /* * Crashdump maps. The first page is reused as CMAP1 for the * memory test. */ SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS) CADDR1 = crashdumpmap; virtual_avail = va; /* * Initialize the PAT MSR. * pmap_init_pat() clears and sets CR4_PGE, which, as a * side-effect, invalidates stale PG_G TLB entries that might * have been created in our pre-boot environment. */ pmap_init_pat(); /* Initialize TLB Context Id. */ TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled); if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) { /* Check for INVPCID support */ invpcid_works = (cpu_stdext_feature & CPUID_STDEXT_INVPCID) != 0; for (i = 0; i < MAXCPU; i++) { kernel_pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN; kernel_pmap->pm_pcids[i].pm_gen = 1; } __pcpu[0].pc_pcid_next = PMAP_PCID_KERN + 1; __pcpu[0].pc_pcid_gen = 1; /* * pcpu area for APs is zeroed during AP startup. * pc_pcid_next and pc_pcid_gen are initialized by AP * during pcpu setup. */ load_cr4(rcr4() | CR4_PCIDE); } else { pmap_pcid_enabled = 0; } } /* * Setup the PAT MSR. */ void pmap_init_pat(void) { int pat_table[PAT_INDEX_SIZE]; uint64_t pat_msr; u_long cr0, cr4; int i; /* Bail if this CPU doesn't implement PAT. */ if ((cpu_feature & CPUID_PAT) == 0) panic("no PAT??"); /* Set default PAT index table. */ for (i = 0; i < PAT_INDEX_SIZE; i++) pat_table[i] = -1; pat_table[PAT_WRITE_BACK] = 0; pat_table[PAT_WRITE_THROUGH] = 1; pat_table[PAT_UNCACHEABLE] = 3; pat_table[PAT_WRITE_COMBINING] = 3; pat_table[PAT_WRITE_PROTECTED] = 3; pat_table[PAT_UNCACHED] = 3; /* Initialize default PAT entries. */ pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | PAT_VALUE(1, PAT_WRITE_THROUGH) | PAT_VALUE(2, PAT_UNCACHED) | PAT_VALUE(3, PAT_UNCACHEABLE) | PAT_VALUE(4, PAT_WRITE_BACK) | PAT_VALUE(5, PAT_WRITE_THROUGH) | PAT_VALUE(6, PAT_UNCACHED) | PAT_VALUE(7, PAT_UNCACHEABLE); if (pat_works) { /* * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. * Program 5 and 6 as WP and WC. * Leave 4 and 7 as WB and UC. */ pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6)); pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) | PAT_VALUE(6, PAT_WRITE_COMBINING); pat_table[PAT_UNCACHED] = 2; pat_table[PAT_WRITE_PROTECTED] = 5; pat_table[PAT_WRITE_COMBINING] = 6; } else { /* * Just replace PAT Index 2 with WC instead of UC-. */ pat_msr &= ~PAT_MASK(2); pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); pat_table[PAT_WRITE_COMBINING] = 2; } /* Disable PGE. */ cr4 = rcr4(); load_cr4(cr4 & ~CR4_PGE); /* Disable caches (CD = 1, NW = 0). */ cr0 = rcr0(); load_cr0((cr0 & ~CR0_NW) | CR0_CD); /* Flushes caches and TLBs. */ wbinvd(); invltlb(); /* Update PAT and index table. */ wrmsr(MSR_PAT, pat_msr); for (i = 0; i < PAT_INDEX_SIZE; i++) pat_index[i] = pat_table[i]; /* Flush caches and TLBs again. */ wbinvd(); invltlb(); /* Restore caches and PGE. */ load_cr0(cr0); load_cr4(cr4); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pat_mode = PAT_WRITE_BACK; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { struct pmap_preinit_mapping *ppim; vm_page_t mpte; vm_size_t s; int error, i, pv_npg; /* * Initialize the vm page array entries for the kernel pmap's * page table pages. */ for (i = 0; i < nkpt; i++) { mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT)); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], ("pmap_init: page table page is out of range")); mpte->pindex = pmap_pde_pindex(KERNBASE) + i; mpte->phys_addr = KPTphys + (i << PAGE_SHIFT); } /* * If the kernel is running on a virtual machine, then it must assume * that MCA is enabled by the hypervisor. Moreover, the kernel must * be prepared for the hypervisor changing the vendor and family that * are reported by CPUID. Consequently, the workaround for AMD Family * 10h Erratum 383 is enabled if the processor's feature set does not * include at least one feature that is only supported by older Intel * or newer AMD processors. */ if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 && (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI | CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP | AMDID2_FMA4)) == 0) workaround_erratum383 = 1; /* * Are large page mappings enabled? */ TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); if (pg_ps_enabled) { KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, ("pmap_init: can't assign to pagesizes[1]")); pagesizes[1] = NBPDR; } /* * Initialize the pv chunk list mutex. */ mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); /* * Initialize the pool of pv list locks. */ for (i = 0; i < NPV_LIST_LOCKS; i++) rw_init(&pv_list_locks[i], "pmap pv list"); /* * Calculate the size of the pv head table for superpages. */ pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR); /* * Allocate memory for the pv head table for superpages. */ s = (vm_size_t)(pv_npg * sizeof(struct md_page)); s = round_page(s); pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); TAILQ_INIT(&pv_dummy.pv_list); pmap_initialized = 1; for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->va == 0) continue; /* Make the direct map consistent */ if (ppim->pa < dmaplimit && ppim->pa + ppim->sz < dmaplimit) { (void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa), ppim->sz, ppim->mode); } if (!bootverbose) continue; printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i, ppim->pa, ppim->va, ppim->sz, ppim->mode); } mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN); error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, (vmem_addr_t *)&qframe); if (error != 0) panic("qframe allocation failed"); } static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, "2MB page mapping counters"); static u_long pmap_pde_demotions; SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD, &pmap_pde_demotions, 0, "2MB page demotions"); static u_long pmap_pde_mappings; SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, &pmap_pde_mappings, 0, "2MB page mappings"); static u_long pmap_pde_p_failures; SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD, &pmap_pde_p_failures, 0, "2MB page promotion failures"); static u_long pmap_pde_promotions; SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD, &pmap_pde_promotions, 0, "2MB page promotions"); static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD, 0, "1GB page mapping counters"); static u_long pmap_pdpe_demotions; SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD, &pmap_pdpe_demotions, 0, "1GB page demotions"); /*************************************************** * Low level helper routines..... ***************************************************/ static pt_entry_t pmap_swap_pat(pmap_t pmap, pt_entry_t entry) { int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT; switch (pmap->pm_type) { case PT_X86: case PT_RVI: /* Verify that both PAT bits are not set at the same time */ KASSERT((entry & x86_pat_bits) != x86_pat_bits, ("Invalid PAT bits in entry %#lx", entry)); /* Swap the PAT bits if one of them is set */ if ((entry & x86_pat_bits) != 0) entry ^= x86_pat_bits; break; case PT_EPT: /* * Nothing to do - the memory attributes are represented * the same way for regular pages and superpages. */ break; default: panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type); } return (entry); } /* * Determine the appropriate bits to set in a PTE or PDE for a specified * caching mode. */ int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde) { int cache_bits, pat_flag, pat_idx; if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0) panic("Unknown caching mode %d\n", mode); switch (pmap->pm_type) { case PT_X86: case PT_RVI: /* The PAT bit is different for PTE's and PDE's. */ pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT; /* Map the caching mode to a PAT index. */ pat_idx = pat_index[mode]; /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ cache_bits = 0; if (pat_idx & 0x4) cache_bits |= pat_flag; if (pat_idx & 0x2) cache_bits |= PG_NC_PCD; if (pat_idx & 0x1) cache_bits |= PG_NC_PWT; break; case PT_EPT: cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode); break; default: panic("unsupported pmap type %d", pmap->pm_type); } return (cache_bits); } static int pmap_cache_mask(pmap_t pmap, boolean_t is_pde) { int mask; switch (pmap->pm_type) { case PT_X86: case PT_RVI: mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE; break; case PT_EPT: mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7); break; default: panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type); } return (mask); } static __inline boolean_t pmap_ps_enabled(pmap_t pmap) { return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0); } static void pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde) { switch (pmap->pm_type) { case PT_X86: break; case PT_RVI: case PT_EPT: /* * XXX * This is a little bogus since the generation number is * supposed to be bumped up when a region of the address * space is invalidated in the page tables. * * In this case the old PDE entry is valid but yet we want * to make sure that any mappings using the old entry are * invalidated in the TLB. * * The reason this works as expected is because we rendezvous * "all" host cpus and force any vcpu context to exit as a * side-effect. */ atomic_add_acq_long(&pmap->pm_eptgen, 1); break; default: panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type); } pde_store(pde, newpde); } /* * After changing the page size for the specified virtual address in the page * table, flush the corresponding entries from the processor's TLB. Only the * calling processor's TLB is affected. * * The calling thread must be pinned to a processor. */ static void pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde) { pt_entry_t PG_G; if (pmap_type_guest(pmap)) return; KASSERT(pmap->pm_type == PT_X86, ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type)); PG_G = pmap_global_bit(pmap); if ((newpde & PG_PS) == 0) /* Demotion: flush a specific 2MB page mapping. */ invlpg(va); else if ((newpde & PG_G) == 0) /* * Promotion: flush every 4KB page mapping from the TLB * because there are too many to flush individually. */ invltlb(); else { /* * Promotion: flush every 4KB page mapping from the TLB, * including any global (PG_G) mappings. */ invltlb_glob(); } } #ifdef SMP /* * For SMP, these functions have to use the IPI mechanism for coherence. * * N.B.: Before calling any of the following TLB invalidation functions, * the calling processor must ensure that all stores updating a non- * kernel page table are globally performed. Otherwise, another * processor could cache an old, pre-update entry without being * invalidated. This can happen one of two ways: (1) The pmap becomes * active on another processor after its pm_active field is checked by * one of the following functions but before a store updating the page * table is globally performed. (2) The pmap becomes active on another * processor before its pm_active field is checked but due to * speculative loads one of the following functions stills reads the * pmap as inactive on the other processor. * * The kernel page table is exempt because its pm_active field is * immutable. The kernel page table is always active on every * processor. */ /* * Interrupt the cpus that are executing in the guest context. * This will force the vcpu to exit and the cached EPT mappings * will be invalidated by the host before the next vmresume. */ static __inline void pmap_invalidate_ept(pmap_t pmap) { int ipinum; sched_pin(); KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), ("pmap_invalidate_ept: absurd pm_active")); /* * The TLB mappings associated with a vcpu context are not * flushed each time a different vcpu is chosen to execute. * * This is in contrast with a process's vtop mappings that * are flushed from the TLB on each context switch. * * Therefore we need to do more than just a TLB shootdown on * the active cpus in 'pmap->pm_active'. To do this we keep * track of the number of invalidations performed on this pmap. * * Each vcpu keeps a cache of this counter and compares it * just before a vmresume. If the counter is out-of-date an * invept will be done to flush stale mappings from the TLB. */ atomic_add_acq_long(&pmap->pm_eptgen, 1); /* * Force the vcpu to exit and trap back into the hypervisor. */ ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK; ipi_selected(pmap->pm_active, ipinum); sched_unpin(); } void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { cpuset_t *mask; u_int cpuid, i; if (pmap_type_guest(pmap)) { pmap_invalidate_ept(pmap); return; } KASSERT(pmap->pm_type == PT_X86, ("pmap_invalidate_page: invalid type %d", pmap->pm_type)); sched_pin(); if (pmap == kernel_pmap) { invlpg(va); mask = &all_cpus; } else { cpuid = PCPU_GET(cpuid); if (pmap == PCPU_GET(curpmap)) invlpg(va); else if (pmap_pcid_enabled) pmap->pm_pcids[cpuid].pm_gen = 0; if (pmap_pcid_enabled) { CPU_FOREACH(i) { if (cpuid != i) pmap->pm_pcids[i].pm_gen = 0; } } mask = &pmap->pm_active; } smp_masked_invlpg(*mask, va); sched_unpin(); } /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { cpuset_t *mask; vm_offset_t addr; u_int cpuid, i; if (eva - sva >= PMAP_INVLPG_THRESHOLD) { pmap_invalidate_all(pmap); return; } if (pmap_type_guest(pmap)) { pmap_invalidate_ept(pmap); return; } KASSERT(pmap->pm_type == PT_X86, ("pmap_invalidate_range: invalid type %d", pmap->pm_type)); sched_pin(); cpuid = PCPU_GET(cpuid); if (pmap == kernel_pmap) { for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); mask = &all_cpus; } else { if (pmap == PCPU_GET(curpmap)) { for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); } else if (pmap_pcid_enabled) { pmap->pm_pcids[cpuid].pm_gen = 0; } if (pmap_pcid_enabled) { CPU_FOREACH(i) { if (cpuid != i) pmap->pm_pcids[i].pm_gen = 0; } } mask = &pmap->pm_active; } smp_masked_invlpg_range(*mask, sva, eva); sched_unpin(); } void pmap_invalidate_all(pmap_t pmap) { cpuset_t *mask; struct invpcid_descr d; u_int cpuid, i; if (pmap_type_guest(pmap)) { pmap_invalidate_ept(pmap); return; } KASSERT(pmap->pm_type == PT_X86, ("pmap_invalidate_all: invalid type %d", pmap->pm_type)); sched_pin(); if (pmap == kernel_pmap) { if (pmap_pcid_enabled && invpcid_works) { bzero(&d, sizeof(d)); invpcid(&d, INVPCID_CTXGLOB); } else { invltlb_glob(); } mask = &all_cpus; } else { cpuid = PCPU_GET(cpuid); if (pmap == PCPU_GET(curpmap)) { if (pmap_pcid_enabled) { if (invpcid_works) { d.pcid = pmap->pm_pcids[cpuid].pm_pcid; d.pad = 0; d.addr = 0; invpcid(&d, INVPCID_CTX); } else { load_cr3(pmap->pm_cr3 | pmap->pm_pcids [PCPU_GET(cpuid)].pm_pcid); } } else { invltlb(); } } else if (pmap_pcid_enabled) { pmap->pm_pcids[cpuid].pm_gen = 0; } if (pmap_pcid_enabled) { CPU_FOREACH(i) { if (cpuid != i) pmap->pm_pcids[i].pm_gen = 0; } } mask = &pmap->pm_active; } smp_masked_invltlb(*mask, pmap); sched_unpin(); } void pmap_invalidate_cache(void) { sched_pin(); wbinvd(); smp_cache_flush(); sched_unpin(); } struct pde_action { cpuset_t invalidate; /* processors that invalidate their TLB */ pmap_t pmap; vm_offset_t va; pd_entry_t *pde; pd_entry_t newpde; u_int store; /* processor that updates the PDE */ }; static void pmap_update_pde_action(void *arg) { struct pde_action *act = arg; if (act->store == PCPU_GET(cpuid)) pmap_update_pde_store(act->pmap, act->pde, act->newpde); } static void pmap_update_pde_teardown(void *arg) { struct pde_action *act = arg; if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) pmap_update_pde_invalidate(act->pmap, act->va, act->newpde); } /* * Change the page size for the specified virtual address in a way that * prevents any possibility of the TLB ever having two entries that map the * same virtual address using different page sizes. This is the recommended * workaround for Erratum 383 on AMD Family 10h processors. It prevents a * machine check exception for a TLB state that is improperly diagnosed as a * hardware error. */ static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) { struct pde_action act; cpuset_t active, other_cpus; u_int cpuid; sched_pin(); cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); if (pmap == kernel_pmap || pmap_type_guest(pmap)) active = all_cpus; else { active = pmap->pm_active; } if (CPU_OVERLAP(&active, &other_cpus)) { act.store = cpuid; act.invalidate = active; act.va = va; act.pmap = pmap; act.pde = pde; act.newpde = newpde; CPU_SET(cpuid, &active); smp_rendezvous_cpus(active, smp_no_rendezvous_barrier, pmap_update_pde_action, pmap_update_pde_teardown, &act); } else { pmap_update_pde_store(pmap, pde, newpde); if (CPU_ISSET(cpuid, &active)) pmap_update_pde_invalidate(pmap, va, newpde); } sched_unpin(); } #else /* !SMP */ /* * Normal, non-SMP, invalidation functions. */ void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) { pmap->pm_eptgen++; return; } KASSERT(pmap->pm_type == PT_X86, ("pmap_invalidate_range: unknown type %d", pmap->pm_type)); if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) invlpg(va); else if (pmap_pcid_enabled) pmap->pm_pcids[0].pm_gen = 0; } void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t addr; if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) { pmap->pm_eptgen++; return; } KASSERT(pmap->pm_type == PT_X86, ("pmap_invalidate_range: unknown type %d", pmap->pm_type)); if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) { for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); } else if (pmap_pcid_enabled) { pmap->pm_pcids[0].pm_gen = 0; } } void pmap_invalidate_all(pmap_t pmap) { struct invpcid_descr d; if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) { pmap->pm_eptgen++; return; } KASSERT(pmap->pm_type == PT_X86, ("pmap_invalidate_all: unknown type %d", pmap->pm_type)); if (pmap == kernel_pmap) { if (pmap_pcid_enabled && invpcid_works) { bzero(&d, sizeof(d)); invpcid(&d, INVPCID_CTXGLOB); } else { invltlb_glob(); } } else if (pmap == PCPU_GET(curpmap)) { if (pmap_pcid_enabled) { if (invpcid_works) { d.pcid = pmap->pm_pcids[0].pm_pcid; d.pad = 0; d.addr = 0; invpcid(&d, INVPCID_CTX); } else { load_cr3(pmap->pm_cr3 | pmap->pm_pcids[0]. pm_pcid); } } else { invltlb(); } } else if (pmap_pcid_enabled) { pmap->pm_pcids[0].pm_gen = 0; } } PMAP_INLINE void pmap_invalidate_cache(void) { wbinvd(); } static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) { pmap_update_pde_store(pmap, pde, newpde); if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) pmap_update_pde_invalidate(pmap, va, newpde); else pmap->pm_pcids[0].pm_gen = 0; } #endif /* !SMP */ static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde) { /* * When the PDE has PG_PROMOTED set, the 2MB page mapping was created * by a promotion that did not invalidate the 512 4KB page mappings * that might exist in the TLB. Consequently, at this point, the TLB * may hold both 4KB and 2MB page mappings for the address range [va, * va + NBPDR). Therefore, the entire range must be invalidated here. * In contrast, when PG_PROMOTED is clear, the TLB will not hold any * 4KB page mappings for the address range [va, va + NBPDR), and so a * single INVLPG suffices to invalidate the 2MB page mapping from the * TLB. */ if ((pde & PG_PROMOTED) != 0) pmap_invalidate_range(pmap, va, va + NBPDR - 1); else pmap_invalidate_page(pmap, va); } #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force) { if (force) { sva &= ~(vm_offset_t)cpu_clflush_line_size; } else { KASSERT((sva & PAGE_MASK) == 0, ("pmap_invalidate_cache_range: sva not page-aligned")); KASSERT((eva & PAGE_MASK) == 0, ("pmap_invalidate_cache_range: eva not page-aligned")); } if ((cpu_feature & CPUID_SS) != 0 && !force) ; /* If "Self Snoop" is supported and allowed, do nothing. */ else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 && eva - sva < PMAP_CLFLUSH_THRESHOLD) { /* * XXX: Some CPUs fault, hang, or trash the local APIC * registers if we use CLFLUSH on the local APIC * range. The local APIC is always uncached, so we * don't need to flush for that range anyway. */ if (pmap_kextract(sva) == lapic_paddr) return; /* * Otherwise, do per-cache line flush. Use the sfence * instruction to insure that previous stores are * included in the write-back. The processor * propagates flush to other processors in the cache * coherence domain. */ sfence(); for (; sva < eva; sva += cpu_clflush_line_size) clflushopt(sva); sfence(); } else if ((cpu_feature & CPUID_CLFSH) != 0 && eva - sva < PMAP_CLFLUSH_THRESHOLD) { if (pmap_kextract(sva) == lapic_paddr) return; /* * Writes are ordered by CLFLUSH on Intel CPUs. */ if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (; sva < eva; sva += cpu_clflush_line_size) clflush(sva); if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); } else { /* * No targeted cache flush methods are supported by CPU, * or the supplied range is bigger than 2MB. * Globally invalidate cache. */ pmap_invalidate_cache(); } } /* * Remove the specified set of pages from the data and instruction caches. * * In contrast to pmap_invalidate_cache_range(), this function does not * rely on the CPU's self-snoop feature, because it is intended for use * when moving pages into a different cache domain. */ void pmap_invalidate_cache_pages(vm_page_t *pages, int count) { vm_offset_t daddr, eva; int i; bool useclflushopt; useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0; if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt)) pmap_invalidate_cache(); else { if (useclflushopt) sfence(); else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (i = 0; i < count; i++) { daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i])); eva = daddr + PAGE_SIZE; for (; daddr < eva; daddr += cpu_clflush_line_size) { if (useclflushopt) clflushopt(daddr); else clflush(daddr); } } if (useclflushopt) sfence(); else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); } } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { pdp_entry_t *pdpe; pd_entry_t *pde; pt_entry_t *pte, PG_V; vm_paddr_t pa; pa = 0; PG_V = pmap_valid_bit(pmap); PMAP_LOCK(pmap); pdpe = pmap_pdpe(pmap, va); if (pdpe != NULL && (*pdpe & PG_V) != 0) { if ((*pdpe & PG_PS) != 0) pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK); else { pde = pmap_pdpe_to_pde(pdpe, va); if ((*pde & PG_V) != 0) { if ((*pde & PG_PS) != 0) { pa = (*pde & PG_PS_FRAME) | (va & PDRMASK); } else { pte = pmap_pde_to_pte(pde, va); pa = (*pte & PG_FRAME) | (va & PAGE_MASK); } } } } PMAP_UNLOCK(pmap); return (pa); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pd_entry_t pde, *pdep; pt_entry_t pte, PG_RW, PG_V; vm_paddr_t pa; vm_page_t m; pa = 0; m = NULL; PG_RW = pmap_rw_bit(pmap); PG_V = pmap_valid_bit(pmap); PMAP_LOCK(pmap); retry: pdep = pmap_pde(pmap, va); if (pdep != NULL && (pde = *pdep)) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | (va & PDRMASK), &pa)) goto retry; m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK)); vm_page_hold(m); } } else { pte = *pmap_pde_to_pte(pdep, va); if ((pte & PG_V) && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) goto retry; m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } } } PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } vm_paddr_t pmap_kextract(vm_offset_t va) { pd_entry_t pde; vm_paddr_t pa; if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { pa = DMAP_TO_PHYS(va); } else { pde = *vtopde(va); if (pde & PG_PS) { pa = (pde & PG_PS_FRAME) | (va & PDRMASK); } else { /* * Beware of a concurrent promotion that changes the * PDE at this point! For example, vtopte() must not * be used to access the PTE because it would use the * new PDE. It is, however, safe to use the old PDE * because the page table page is preserved by the * promotion. */ pa = *pmap_pde_to_pte(&pde, va); pa = (pa & PG_FRAME) | (va & PAGE_MASK); } } return (pa); } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Add a wired page to the kva. * Note: not SMP coherent. */ PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_paddr_t pa) { pt_entry_t *pte; pte = vtopte(va); pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G); } static __inline void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) { pt_entry_t *pte; int cache_bits; pte = vtopte(va); cache_bits = pmap_cache_bits(kernel_pmap, mode, 0); pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G | cache_bits); } /* * Remove a page from the kernel pagetables. * Note: not SMP coherent. */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { pt_entry_t *pte; pte = vtopte(va); pte_clear(pte); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { return PHYS_TO_DMAP(start); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) { pt_entry_t *endpte, oldpte, pa, *pte; vm_page_t m; int cache_bits; oldpte = 0; pte = vtopte(sva); endpte = pte + count; while (pte < endpte) { m = *ma++; cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); pa = VM_PAGE_TO_PHYS(m) | cache_bits; if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) { oldpte |= *pte; pte_store(pte, pa | X86_PG_G | X86_PG_RW | X86_PG_V); } pte++; } if (__predict_false((oldpte & X86_PG_V) != 0)) pmap_invalidate_range(kernel_pmap, sva, sva + count * PAGE_SIZE); } /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; va = sva; while (count-- > 0) { KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va)); pmap_kremove(va); va += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /*************************************************** * Page table page management routines..... ***************************************************/ static __inline void pmap_free_zero_pages(struct spglist *free) { vm_page_t m; while ((m = SLIST_FIRST(free)) != NULL) { SLIST_REMOVE_HEAD(free, plinks.s.ss); /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } } /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the * physical memory manager after the TLB has been updated. */ static __inline void pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, boolean_t set_PG_ZERO) { if (set_PG_ZERO) m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; SLIST_INSERT_HEAD(free, m, plinks.s.ss); } /* * Inserts the specified page table page into the specified pmap's collection * of idle page table pages. Each of a pmap's page table pages is responsible * for mapping a distinct range of virtual addresses. The pmap's collection is * ordered by this virtual address range. */ static __inline int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); return (vm_radix_insert(&pmap->pm_root, mpte)); } /* * Removes the page table page mapping the specified virtual address from the * specified pmap's collection of idle page table pages, and returns it. * Otherwise, returns NULL if there is no page table page corresponding to the * specified virtual address. */ static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va))); } /* * Decrements a page table page's wire count, which is used to record the * number of valid page table entries within the page. If the wire count * drops to zero, then the page table page is unmapped. Returns TRUE if the * page table page was unmapped and FALSE otherwise. */ static inline boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { --m->wire_count; if (m->wire_count == 0) { _pmap_unwire_ptp(pmap, va, m, free); return (TRUE); } else return (FALSE); } static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * unmap the page table page */ if (m->pindex >= (NUPDE + NUPDPE)) { /* PDP page */ pml4_entry_t *pml4; pml4 = pmap_pml4e(pmap, va); *pml4 = 0; } else if (m->pindex >= NUPDE) { /* PD page */ pdp_entry_t *pdp; pdp = pmap_pdpe(pmap, va); *pdp = 0; } else { /* PTE page */ pd_entry_t *pd; pd = pmap_pde(pmap, va); *pd = 0; } pmap_resident_count_dec(pmap, 1); if (m->pindex < NUPDE) { /* We just released a PT, unhold the matching PD */ vm_page_t pdpg; pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME); pmap_unwire_ptp(pmap, va, pdpg, free); } if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) { /* We just released a PD, unhold the matching PDP */ vm_page_t pdppg; pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME); pmap_unwire_ptp(pmap, va, pdppg, free); } /* * This is a release store so that the ordinary store unmapping * the page table page is globally performed before TLB shoot- * down is begun. */ atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1); /* * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ pmap_add_delayed_free_list(m, free, TRUE); } /* * After removing a page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ static int pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, struct spglist *free) { vm_page_t mpte; if (va >= VM_MAXUSER_ADDRESS) return (0); KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); return (pmap_unwire_ptp(pmap, va, mpte, free)); } void pmap_pinit0(pmap_t pmap) { int i; PMAP_LOCK_INIT(pmap); pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys); pmap->pm_cr3 = KPML4phys; pmap->pm_root.rt_root = 0; CPU_ZERO(&pmap->pm_active); TAILQ_INIT(&pmap->pm_pvchunk); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); pmap->pm_flags = pmap_flags; CPU_FOREACH(i) { pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE; pmap->pm_pcids[i].pm_gen = 0; } PCPU_SET(curpmap, kernel_pmap); pmap_activate(curthread); CPU_FILL(&kernel_pmap->pm_active); } void pmap_pinit_pml4(vm_page_t pml4pg) { pml4_entry_t *pm_pml4; int i; pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg)); /* Wire in kernel global address entries. */ for (i = 0; i < NKPML4E; i++) { pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW | X86_PG_V | PG_U; } for (i = 0; i < ndmpdpphys; i++) { pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW | X86_PG_V | PG_U; } /* install self-referential address mapping entry(s) */ pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) { vm_page_t pml4pg; vm_paddr_t pml4phys; int i; /* * allocate the page directory page */ while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) VM_WAIT; pml4phys = VM_PAGE_TO_PHYS(pml4pg); pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys); CPU_FOREACH(i) { pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE; pmap->pm_pcids[i].pm_gen = 0; } pmap->pm_cr3 = ~0; /* initialize to an invalid value */ if ((pml4pg->flags & PG_ZERO) == 0) pagezero(pmap->pm_pml4); /* * Do not install the host kernel mappings in the nested page * tables. These mappings are meaningless in the guest physical * address space. */ if ((pmap->pm_type = pm_type) == PT_X86) { pmap->pm_cr3 = pml4phys; pmap_pinit_pml4(pml4pg); } pmap->pm_root.rt_root = 0; CPU_ZERO(&pmap->pm_active); TAILQ_INIT(&pmap->pm_pvchunk); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); pmap->pm_flags = flags; pmap->pm_eptgen = 0; return (1); } int pmap_pinit(pmap_t pmap) { return (pmap_pinit_type(pmap, PT_X86, pmap_flags)); } /* * This routine is called if the desired page table page does not exist. * * If page table page allocation fails, this routine may sleep before * returning NULL. It sleeps only if a lock pointer was given. * * Note: If a page allocation fails at page table level two or three, * one or two pages may be held during the wait, only to be released * afterwards. This conservative approach is easily argued to avoid * race conditions. */ static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) { vm_page_t m, pdppg, pdpg; pt_entry_t PG_A, PG_M, PG_RW, PG_V; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); /* * Allocate a page table page. */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); PMAP_ASSERT_NOT_IN_DI(); VM_WAIT; PMAP_LOCK(pmap); } /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); /* * Map the pagetable page into the process address space, if * it isn't already there. */ if (ptepindex >= (NUPDE + NUPDPE)) { pml4_entry_t *pml4; vm_pindex_t pml4index; /* Wire up a new PDPE page */ pml4index = ptepindex - (NUPDE + NUPDPE); pml4 = &pmap->pm_pml4[pml4index]; *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; } else if (ptepindex >= NUPDE) { vm_pindex_t pml4index; vm_pindex_t pdpindex; pml4_entry_t *pml4; pdp_entry_t *pdp; /* Wire up a new PDE page */ pdpindex = ptepindex - NUPDE; pml4index = pdpindex >> NPML4EPGSHIFT; pml4 = &pmap->pm_pml4[pml4index]; if ((*pml4 & PG_V) == 0) { /* Have to allocate a new pdp, recurse */ if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index, lockp) == NULL) { --m->wire_count; atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } } else { /* Add reference to pdp page */ pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME); pdppg->wire_count++; } pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); /* Now find the pdp page */ pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; } else { vm_pindex_t pml4index; vm_pindex_t pdpindex; pml4_entry_t *pml4; pdp_entry_t *pdp; pd_entry_t *pd; /* Wire up a new PTE page */ pdpindex = ptepindex >> NPDPEPGSHIFT; pml4index = pdpindex >> NPML4EPGSHIFT; /* First, find the pdp and check that its valid. */ pml4 = &pmap->pm_pml4[pml4index]; if ((*pml4 & PG_V) == 0) { /* Have to allocate a new pd, recurse */ if (_pmap_allocpte(pmap, NUPDE + pdpindex, lockp) == NULL) { --m->wire_count; atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; } else { pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; if ((*pdp & PG_V) == 0) { /* Have to allocate a new pd, recurse */ if (_pmap_allocpte(pmap, NUPDE + pdpindex, lockp) == NULL) { --m->wire_count; atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } } else { /* Add reference to the pd page */ pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME); pdpg->wire_count++; } } pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME); /* Now we know where the page directory page is */ pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)]; *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; } pmap_resident_count_inc(pmap, 1); return (m); } static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) { vm_pindex_t pdpindex, ptepindex; pdp_entry_t *pdpe, PG_V; vm_page_t pdpg; PG_V = pmap_valid_bit(pmap); retry: pdpe = pmap_pdpe(pmap, va); if (pdpe != NULL && (*pdpe & PG_V) != 0) { /* Add a reference to the pd page. */ pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME); pdpg->wire_count++; } else { /* Allocate a pd page. */ ptepindex = pmap_pde_pindex(va); pdpindex = ptepindex >> NPDPEPGSHIFT; pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp); if (pdpg == NULL && lockp != NULL) goto retry; } return (pdpg); } static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) { vm_pindex_t ptepindex; pd_entry_t *pd, PG_V; vm_page_t m; PG_V = pmap_valid_bit(pmap); /* * Calculate pagetable page index */ ptepindex = pmap_pde_pindex(va); retry: /* * Get the page directory entry */ pd = pmap_pde(pmap, va); /* * This supports switching from a 2MB page to a * normal 4K page. */ if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) { if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) { /* * Invalidation of the 2MB page mapping may have caused * the deallocation of the underlying PD page. */ pd = NULL; } } /* * If the page table page is mapped, we just increment the * hold count, and activate it. */ if (pd != NULL && (*pd & PG_V) != 0) { m = PHYS_TO_VM_PAGE(*pd & PG_FRAME); m->wire_count++; } else { /* * Here if the pte page isn't mapped, or if it has been * deallocated. */ m = _pmap_allocpte(pmap, ptepindex, lockp); if (m == NULL && lockp != NULL) goto retry; } return (m); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { vm_page_t m; int i; KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); KASSERT(vm_radix_is_empty(&pmap->pm_root), ("pmap_release: pmap has reserved page table page(s)")); KASSERT(CPU_EMPTY(&pmap->pm_active), ("releasing active pmap %p", pmap)); m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4)); for (i = 0; i < NKPML4E; i++) /* KVA */ pmap->pm_pml4[KPML4BASE + i] = 0; for (i = 0; i < ndmpdpphys; i++)/* Direct Map */ pmap->pm_pml4[DMPML4I + i] = 0; pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */ m->wire_count--; atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); } static int kvm_size(SYSCTL_HANDLER_ARGS) { unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; return sysctl_handle_long(oidp, &ksize, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_size, "LU", "Size of KVM"); static int kvm_free(SYSCTL_HANDLER_ARGS) { unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; return sysctl_handle_long(oidp, &kfree, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_free, "LU", "Amount of KVM free"); /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { vm_paddr_t paddr; vm_page_t nkpg; pd_entry_t *pde, newpdir; pdp_entry_t *pdpe; mtx_assert(&kernel_map->system_mtx, MA_OWNED); /* * Return if "addr" is within the range of kernel page table pages * that were preallocated during pmap bootstrap. Moreover, leave * "kernel_vm_end" and the kernel page table as they were. * * The correctness of this action is based on the following * argument: vm_map_insert() allocates contiguous ranges of the * kernel virtual address space. It calls this function if a range * ends after "kernel_vm_end". If the kernel is mapped between * "kernel_vm_end" and "addr", then the range cannot begin at * "kernel_vm_end". In fact, its beginning address cannot be less * than the kernel. Thus, there is no immediate need to allocate * any new kernel page table pages between "kernel_vm_end" and * "KERNBASE". */ if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR) return; addr = roundup2(addr, NBPDR); if (addr - 1 >= kernel_map->max_offset) addr = kernel_map->max_offset; while (kernel_vm_end < addr) { pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end); if ((*pdpe & X86_PG_V) == 0) { /* We need a new PDP entry */ nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); if ((nkpg->flags & PG_ZERO) == 0) pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M); continue; /* try again */ } pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); if ((*pde & X86_PG_V) != 0) { kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } continue; } nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end), VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); if ((nkpg->flags & PG_ZERO) == 0) pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; pde_store(pde, newpdir); kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } } } /*************************************************** * page management routines. ***************************************************/ CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); CTASSERT(_NPCM == 3); CTASSERT(_NPCPV == 168); static __inline struct pv_chunk * pv_to_chunk(pv_entry_t pv) { return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); } #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) #define PC_FREE0 0xfffffffffffffffful #define PC_FREE1 0xfffffffffffffffful #define PC_FREE2 0x000000fffffffffful static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 }; #ifdef PV_STATS static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, "Current number of pv entry chunks"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, "Current number of pv entry chunks allocated"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, "Current number of pv entry chunks frees"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, "Number of times tried to get a chunk page but failed."); static long pv_entry_frees, pv_entry_allocs, pv_entry_count; static int pv_entry_spare; SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, "Current number of pv entry frees"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, "Current number of pv entry allocs"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, "Current number of pv entries"); SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, "Current number of spare pv entries"); #endif /* * We are in a serious low memory condition. Resort to * drastic measures to free some pages so we can allocate * another pv entry chunk. * * Returns NULL if PV entries were reclaimed from the specified pmap. * * We do not, however, unmap 2mpages because subsequent accesses will * allocate per-page pv entries until repromotion occurs, thereby * exacerbating the shortage of free pv entries. */ static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) { struct pch new_tail; struct pv_chunk *pc; struct md_page *pvh; pd_entry_t *pde; pmap_t pmap; pt_entry_t *pte, tpte; pt_entry_t PG_G, PG_A, PG_M, PG_RW; pv_entry_t pv; vm_offset_t va; vm_page_t m, m_pc; struct spglist free; uint64_t inuse; int bit, field, freed; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); pmap = NULL; m_pc = NULL; PG_G = PG_A = PG_M = PG_RW = 0; SLIST_INIT(&free); TAILQ_INIT(&new_tail); pmap_delayed_invl_started(); mtx_lock(&pv_chunks_mutex); while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) { TAILQ_REMOVE(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); if (pmap != pc->pc_pmap) { if (pmap != NULL) { pmap_invalidate_all(pmap); if (pmap != locked_pmap) PMAP_UNLOCK(pmap); } pmap_delayed_invl_finished(); pmap_delayed_invl_started(); pmap = pc->pc_pmap; /* Avoid deadlock and lock recursion. */ if (pmap > locked_pmap) { RELEASE_PV_LIST_LOCK(lockp); PMAP_LOCK(pmap); } else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { pmap = NULL; TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); mtx_lock(&pv_chunks_mutex); continue; } PG_G = pmap_global_bit(pmap); PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); } /* * Destroy every non-wired, 4 KB page mapping in the chunk. */ freed = 0; for (field = 0; field < _NPCM; field++) { for (inuse = ~pc->pc_map[field] & pc_freemask[field]; inuse != 0; inuse &= ~(1UL << bit)) { bit = bsfq(inuse); pv = &pc->pc_pventry[field * 64 + bit]; va = pv->pv_va; pde = pmap_pde(pmap, va); if ((*pde & PG_PS) != 0) continue; pte = pmap_pde_to_pte(pde, va); if ((*pte & PG_W) != 0) continue; tpte = pte_load_clear(pte); if ((tpte & PG_G) != 0) pmap_invalidate_page(pmap, va); m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if ((tpte & PG_A) != 0) vm_page_aflag_set(m, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) { vm_page_aflag_clear(m, PGA_WRITEABLE); } } pmap_delayed_invl_page(m); pc->pc_map[field] |= 1UL << bit; pmap_unuse_pt(pmap, va, *pde, &free); freed++; } } if (freed == 0) { TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); mtx_lock(&pv_chunks_mutex); continue; } /* Every freed mapping is for a 4 KB page. */ pmap_resident_count_dec(pmap, freed); PV_STAT(atomic_add_long(&pv_entry_frees, freed)); PV_STAT(atomic_add_int(&pv_entry_spare, freed)); PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 && pc->pc_map[2] == PC_FREE2) { PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m_pc->phys_addr); mtx_lock(&pv_chunks_mutex); break; } TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); mtx_lock(&pv_chunks_mutex); /* One freed pv entry in locked_pmap is sufficient. */ if (pmap == locked_pmap) break; } TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); mtx_unlock(&pv_chunks_mutex); if (pmap != NULL) { pmap_invalidate_all(pmap); if (pmap != locked_pmap) PMAP_UNLOCK(pmap); } pmap_delayed_invl_finished(); if (m_pc == NULL && !SLIST_EMPTY(&free)) { m_pc = SLIST_FIRST(&free); SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ m_pc->wire_count = 1; atomic_add_int(&vm_cnt.v_wire_count, 1); } pmap_free_zero_pages(&free); return (m_pc); } /* * free the pv_entry back to the free list */ static void free_pv_entry(pmap_t pmap, pv_entry_t pv) { struct pv_chunk *pc; int idx, field, bit; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(atomic_add_long(&pv_entry_frees, 1)); PV_STAT(atomic_add_int(&pv_entry_spare, 1)); PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); pc = pv_to_chunk(pv); idx = pv - &pc->pc_pventry[0]; field = idx / 64; bit = idx % 64; pc->pc_map[field] |= 1ul << bit; if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 || pc->pc_map[2] != PC_FREE2) { /* 98% of the time, pc is already at the head of the list. */ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); } return; } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } static void free_pv_chunk(struct pv_chunk *pc) { vm_page_t m; mtx_lock(&pv_chunks_mutex); TAILQ_REMOVE(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); vm_page_unwire(m, PQ_NONE); vm_page_free(m); } /* * Returns a new PV entry, allocating a new PV chunk from the system when * needed. If this PV chunk allocation fails and a PV list lock pointer was * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is * returned. * * The given PV list lock may be released. */ static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp) { int bit, field; pv_entry_t pv; struct pv_chunk *pc; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); retry: pc = TAILQ_FIRST(&pmap->pm_pvchunk); if (pc != NULL) { for (field = 0; field < _NPCM; field++) { if (pc->pc_map[field]) { bit = bsfq(pc->pc_map[field]); break; } } if (field < _NPCM) { pv = &pc->pc_pventry[field * 64 + bit]; pc->pc_map[field] &= ~(1ul << bit); /* If this was the last item, move it to tail */ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } PV_STAT(atomic_add_long(&pv_entry_count, 1)); PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); return (pv); } } /* No free items, allocate another chunk */ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); return (NULL); } m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ pc->pc_map[1] = PC_FREE1; pc->pc_map[2] = PC_FREE2; mtx_lock(&pv_chunks_mutex); TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); pv = &pc->pc_pventry[0]; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); PV_STAT(atomic_add_long(&pv_entry_count, 1)); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); return (pv); } /* * Returns the number of one bits within the given PV chunk map. * * The erratas for Intel processors state that "POPCNT Instruction May * Take Longer to Execute Than Expected". It is believed that the * issue is the spurious dependency on the destination register. * Provide a hint to the register rename logic that the destination * value is overwritten, by clearing it, as suggested in the * optimization manual. It should be cheap for unaffected processors * as well. * * Reference numbers for erratas are * 4th Gen Core: HSD146 * 5th Gen Core: BDM85 * 6th Gen Core: SKL029 */ static int popcnt_pc_map_pq(uint64_t *map) { u_long result, tmp; __asm __volatile("xorl %k0,%k0;popcntq %2,%0;" "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;" "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0" : "=&r" (result), "=&r" (tmp) : "m" (map[0]), "m" (map[1]), "m" (map[2])); return (result); } /* * Ensure that the number of spare PV entries in the specified pmap meets or * exceeds the given count, "needed". * * The given PV list lock may be released. */ static void reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) { struct pch new_tail; struct pv_chunk *pc; int avail, free; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); /* * Newly allocated PV chunks must be stored in a private list until * the required number of PV chunks have been allocated. Otherwise, * reclaim_pv_chunk() could recycle one of these chunks. In * contrast, these chunks must be added to the pmap upon allocation. */ TAILQ_INIT(&new_tail); retry: avail = 0; TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) { #ifndef __POPCNT__ if ((cpu_feature2 & CPUID2_POPCNT) == 0) bit_count((bitstr_t *)pc->pc_map, 0, sizeof(pc->pc_map) * NBBY, &free); else #endif free = popcnt_pc_map_pq(pc->pc_map); if (free == 0) break; avail += free; if (avail >= needed) break; } for (; avail < needed; avail += _NPCPV) { m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; pc->pc_map[0] = PC_FREE0; pc->pc_map[1] = PC_FREE1; pc->pc_map[2] = PC_FREE2; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); } if (!TAILQ_EMPTY(&new_tail)) { mtx_lock(&pv_chunks_mutex); TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); mtx_unlock(&pv_chunks_mutex); } } /* * First find and then remove the pv entry for the specified pmap and virtual * address from the specified pv list. Returns the pv entry if found and NULL * otherwise. This operation can be performed on pv lists for either 4KB or * 2MB page mappings. */ static __inline pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; break; } } return (pv); } /* * After demotion from a 2MB page mapping to 512 4KB page mappings, * destroy the pv entry for the 2MB page mapping and reinstantiate the pv * entries for each of the 4KB page mappings. */ static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp) { struct md_page *pvh; struct pv_chunk *pc; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; int bit, field; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_demote_pde: pa is not 2mpage aligned")); CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); /* * Transfer the 2mpage's pv entry for this mapping to the first * page's pv list. Once this transfer begins, the pv list lock * must not be released until the last pv entry is reinstantiated. */ pvh = pa_to_pvh(pa); va = trunc_2mpage(va); pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); m = PHYS_TO_VM_PAGE(pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; /* Instantiate the remaining NPTEPG - 1 pv entries. */ PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1)); va_last = va + NBPDR - PAGE_SIZE; for (;;) { pc = TAILQ_FIRST(&pmap->pm_pvchunk); KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 || pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare")); for (field = 0; field < _NPCM; field++) { while (pc->pc_map[field]) { bit = bsfq(pc->pc_map[field]); pc->pc_map[field] &= ~(1ul << bit); pv = &pc->pc_pventry[field * 64 + bit]; va += PAGE_SIZE; pv->pv_va = va; m++; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_pv_demote_pde: page %p is not managed", m)); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if (va == va_last) goto out; } } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } out: if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1)); PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1)); } /* * After promotion from 512 4KB page mappings to a single 2MB page mapping, * replace the many pv entries for the 4KB page mappings by a single pv entry * for the 2MB page mapping. */ static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp) { struct md_page *pvh; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; KASSERT((pa & PDRMASK) == 0, ("pmap_pv_promote_pde: pa is not 2mpage aligned")); CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); /* * Transfer the first page's pv entry for this mapping to the 2mpage's * pv list. Aside from avoiding the cost of a call to get_pv_entry(), * a transfer avoids the possibility that get_pv_entry() calls * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the * mappings that is being promoted. */ m = PHYS_TO_VM_PAGE(pa); va = trunc_2mpage(va); pv = pmap_pvh_remove(&m->md, pmap, va); KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); pvh = pa_to_pvh(pa); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; /* Free the remaining NPTEPG - 1 pv entries. */ va_last = va + NBPDR - PAGE_SIZE; do { m++; va += PAGE_SIZE; pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } /* * First find and then destroy the pv entry for the specified pmap and virtual * address. This operation can be performed on pv lists for either 4KB or 2MB * page mappings. */ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); free_pv_entry(pmap, pv); } /* * Conditionally create the PV entry for a 4KB page mapping if the required * memory can be allocated without resorting to reclamation. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* Pass NULL instead of the lock pointer to disable reclamation. */ if ((pv = get_pv_entry(pmap, NULL)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; return (TRUE); } else return (FALSE); } /* * Conditionally create the PV entry for a 2MB page mapping if the required * memory can be allocated without resorting to reclamation. */ static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp) { struct md_page *pvh; pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* Pass NULL instead of the lock pointer to disable reclamation. */ if ((pv = get_pv_entry(pmap, NULL)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); pvh = pa_to_pvh(pa); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; return (TRUE); } else return (FALSE); } /* * Fills a page table page with mappings to consecutive physical pages. */ static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) { pt_entry_t *pte; for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { *pte = newpte; newpte += PAGE_SIZE; } } /* * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page * mapping is invalidated. */ static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) { struct rwlock *lock; boolean_t rv; lock = NULL; rv = pmap_demote_pde_locked(pmap, pde, va, &lock); if (lock != NULL) rw_wunlock(lock); return (rv); } static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp) { pd_entry_t newpde, oldpde; pt_entry_t *firstpte, newpte; pt_entry_t PG_A, PG_G, PG_M, PG_RW, PG_V; vm_paddr_t mptepa; vm_page_t mpte; struct spglist free; vm_offset_t sva; int PG_PTE_CACHE; PG_G = pmap_global_bit(pmap); PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_PTE_CACHE = pmap_cache_mask(pmap, 0); PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpde = *pde; KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == NULL) { KASSERT((oldpde & PG_W) == 0, ("pmap_demote_pde: page table page for a wired mapping" " is missing")); /* * Invalidate the 2MB page mapping and return "failure" if the * mapping was never accessed or the allocation of the new * page table page fails. If the 2MB page mapping belongs to * the direct map region of the kernel's address space, then * the page allocation request specifies the highest possible * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is * normal. Page table pages are preallocated for every other * part of the kernel address space, so the direct map region * is the only part of the kernel address space that must be * handled here. */ if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { SLIST_INIT(&free); sva = trunc_2mpage(va); pmap_remove_pde(pmap, pde, sva, &free, lockp); if ((oldpde & PG_G) == 0) pmap_invalidate_pde_page(pmap, sva, oldpde); pmap_free_zero_pages(&free); CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); } if (va < VM_MAXUSER_ADDRESS) pmap_resident_count_inc(pmap, 1); } mptepa = VM_PAGE_TO_PHYS(mpte); firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa); newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; KASSERT((oldpde & PG_A) != 0, ("pmap_demote_pde: oldpde is missing PG_A")); KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, ("pmap_demote_pde: oldpde is missing PG_M")); newpte = oldpde & ~PG_PS; newpte = pmap_swap_pat(pmap, newpte); /* * If the page table page is new, initialize it. */ if (mpte->wire_count == 1) { mpte->wire_count = NPTEPG; pmap_fill_ptp(firstpte, newpte); } KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), ("pmap_demote_pde: firstpte and newpte map different physical" " addresses")); /* * If the mapping has changed attributes, update the page table * entries. */ if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) pmap_fill_ptp(firstpte, newpte); /* * The spare PV entries must be reserved prior to demoting the * mapping, that is, prior to changing the PDE. Otherwise, the state * of the PDE and the PV lists will be inconsistent, which can result * in reclaim_pv_chunk() attempting to remove a PV entry from the * wrong PV list and pmap_pv_demote_pde() failing to find the expected * PV entry for the 2MB page mapping that is being demoted. */ if ((oldpde & PG_MANAGED) != 0) reserve_pv_entries(pmap, NPTEPG - 1, lockp); /* * Demote the mapping. This pmap is locked. The old PDE has * PG_A set. If the old PDE has PG_RW set, it also has PG_M * set. Thus, there is no danger of a race with another * processor changing the setting of PG_A and/or PG_M between * the read above and the store below. */ if (workaround_erratum383) pmap_update_pde(pmap, va, pde, newpde); else pde_store(pde, newpde); /* * Invalidate a stale recursive mapping of the page table page. */ if (va >= VM_MAXUSER_ADDRESS) pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va)); /* * Demote the PV entry. */ if ((oldpde & PG_MANAGED) != 0) pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp); atomic_add_long(&pmap_pde_demotions, 1); CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx" " in pmap %p", va, pmap); return (TRUE); } /* * pmap_remove_kernel_pde: Remove a kernel superpage mapping. */ static void pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) { pd_entry_t newpde; vm_paddr_t mptepa; vm_page_t mpte; KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap)); PMAP_LOCK_ASSERT(pmap, MA_OWNED); mpte = pmap_remove_pt_page(pmap, va); if (mpte == NULL) panic("pmap_remove_kernel_pde: Missing pt page."); mptepa = VM_PAGE_TO_PHYS(mpte); newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V; /* * Initialize the page table page. */ pagezero((void *)PHYS_TO_DMAP(mptepa)); /* * Demote the mapping. */ if (workaround_erratum383) pmap_update_pde(pmap, va, pde, newpde); else pde_store(pde, newpde); /* * Invalidate a stale recursive mapping of the page table page. */ pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va)); } /* * pmap_remove_pde: do the things to unmap a superpage in a process */ static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; pd_entry_t oldpde; vm_offset_t eva, va; vm_page_t m, mpte; pt_entry_t PG_G, PG_A, PG_M, PG_RW; PG_G = pmap_global_bit(pmap); PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((sva & PDRMASK) == 0, ("pmap_remove_pde: sva is not 2mpage aligned")); oldpde = pte_load_clear(pdq); if (oldpde & PG_W) pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; if ((oldpde & PG_G) != 0) pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE); if (oldpde & PG_MANAGED) { CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME); pvh = pa_to_pvh(oldpde & PG_PS_FRAME); pmap_pvh_free(pvh, pmap, sva); eva = sva + NBPDR; for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); va < eva; va += PAGE_SIZE, m++) { if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpde & PG_A) vm_page_aflag_set(m, PGA_REFERENCED); if (TAILQ_EMPTY(&m->md.pv_list) && TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); pmap_delayed_invl_page(m); } } if (pmap == kernel_pmap) { pmap_remove_kernel_pde(pmap, pdq, sva); } else { mpte = pmap_remove_pt_page(pmap, sva); if (mpte != NULL) { pmap_resident_count_dec(pmap, 1); KASSERT(mpte->wire_count == NPTEPG, ("pmap_remove_pde: pte page wire count error")); mpte->wire_count = 0; pmap_add_delayed_free_list(mpte, free, FALSE); atomic_subtract_int(&vm_cnt.v_wire_count, 1); } } return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free)); } /* * pmap_remove_pte: do the things to unmap a page in a process */ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; pt_entry_t oldpte, PG_A, PG_M, PG_RW; vm_page_t m; PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpte = pte_load_clear(ptq); if (oldpte & PG_W) pmap->pm_stats.wired_count -= 1; pmap_resident_count_dec(pmap, 1); if (oldpte & PG_MANAGED) { m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpte & PG_A) vm_page_aflag_set(m, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } pmap_delayed_invl_page(m); } return (pmap_unuse_pt(pmap, va, ptepde, free)); } /* * Remove a single page from a process address space */ static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, struct spglist *free) { struct rwlock *lock; pt_entry_t *pte, PG_V; PG_V = pmap_valid_bit(pmap); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((*pde & PG_V) == 0) return; pte = pmap_pde_to_pte(pde, va); if ((*pte & PG_V) == 0) return; lock = NULL; pmap_remove_pte(pmap, pte, va, *pde, free, &lock); if (lock != NULL) rw_wunlock(lock); pmap_invalidate_page(pmap, va); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { struct rwlock *lock; vm_offset_t va, va_next; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t ptpaddr, *pde; pt_entry_t *pte, PG_G, PG_V; struct spglist free; int anyvalid; PG_G = pmap_global_bit(pmap); PG_V = pmap_valid_bit(pmap); /* * Perform an unsynchronized read. This is, however, safe. */ if (pmap->pm_stats.resident_count == 0) return; anyvalid = 0; SLIST_INIT(&free); pmap_delayed_invl_started(); PMAP_LOCK(pmap); /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if (sva + PAGE_SIZE == eva) { pde = pmap_pde(pmap, sva); if (pde && (*pde & PG_PS) == 0) { pmap_remove_page(pmap, sva, pde, &free); goto out; } } lock = NULL; for (; sva < eva; sva = va_next) { if (pmap->pm_stats.resident_count == 0) break; pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { va_next = (sva + NBPML4) & ~PML4MASK; if (va_next < sva) va_next = eva; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { va_next = (sva + NBPDP) & ~PDPMASK; if (va_next < sva) va_next = eva; continue; } /* * Calculate index for next page table. */ va_next = (sva + NBPDR) & ~PDRMASK; if (va_next < sva) va_next = eva; pde = pmap_pdpe_to_pde(pdpe, sva); ptpaddr = *pde; /* * Weed out invalid mappings. */ if (ptpaddr == 0) continue; /* * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { /* * Are we removing the entire large page? If not, * demote the mapping and fall through. */ if (sva + NBPDR == va_next && eva >= va_next) { /* * The TLB entry for a PG_G mapping is * invalidated by pmap_remove_pde(). */ if ((ptpaddr & PG_G) == 0) anyvalid = 1; pmap_remove_pde(pmap, pde, sva, &free, &lock); continue; } else if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) { /* The large page mapping was destroyed. */ continue; } else ptpaddr = *pde; } /* * Limit our scan to either the end of the va represented * by the current page table page, or to the end of the * range being removed. */ if (va_next > eva) va_next = eva; va = va_next; for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, sva += PAGE_SIZE) { if (*pte == 0) { if (va != va_next) { pmap_invalidate_range(pmap, va, sva); va = va_next; } continue; } if ((*pte & PG_G) == 0) anyvalid = 1; else if (va == va_next) va = sva; if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free, &lock)) { sva += PAGE_SIZE; break; } } if (va != va_next) pmap_invalidate_range(pmap, va, sva); } if (lock != NULL) rw_wunlock(lock); out: if (anyvalid) pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); pmap_delayed_invl_finished(); pmap_free_zero_pages(&free); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { struct md_page *pvh; pv_entry_t pv; pmap_t pmap; struct rwlock *lock; pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW; pd_entry_t *pde; vm_offset_t va; struct spglist free; int pvh_gen, md_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); SLIST_INIT(&free); lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); retry: rw_wlock(lock); while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { rw_wunlock(lock); PMAP_UNLOCK(pmap); goto retry; } } va = pv->pv_va; pde = pmap_pde(pmap, va); (void)pmap_demote_pde_locked(pmap, pde, va, &lock); PMAP_UNLOCK(pmap); } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { rw_wunlock(lock); PMAP_UNLOCK(pmap); goto retry; } } PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); pmap_resident_count_dec(pmap, 1); pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" " a 2mpage in page %p's pv list", m)); pte = pmap_pde_to_pte(pde, pv->pv_va); tpte = pte_load_clear(pte); if (tpte & PG_W) pmap->pm_stats.wired_count--; if (tpte & PG_A) vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. */ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); pmap_unuse_pt(pmap, pv->pv_va, *pde, &free); pmap_invalidate_page(pmap, pv->pv_va); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(lock); pmap_delayed_invl_wait(m); pmap_free_zero_pages(&free); } /* * pmap_protect_pde: do the things to protect a 2mpage in a process */ static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) { pd_entry_t newpde, oldpde; vm_offset_t eva, va; vm_page_t m; boolean_t anychanged; pt_entry_t PG_G, PG_M, PG_RW; PG_G = pmap_global_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((sva & PDRMASK) == 0, ("pmap_protect_pde: sva is not 2mpage aligned")); anychanged = FALSE; retry: oldpde = newpde = *pde; if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == (PG_MANAGED | PG_M | PG_RW)) { eva = sva + NBPDR; for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); va < eva; va += PAGE_SIZE, m++) vm_page_dirty(m); } if ((prot & VM_PROT_WRITE) == 0) newpde &= ~(PG_RW | PG_M); if ((prot & VM_PROT_EXECUTE) == 0) newpde |= pg_nx; if (newpde != oldpde) { /* * As an optimization to future operations on this PDE, clear * PG_PROMOTED. The impending invalidation will remove any * lingering 4KB page mappings from the TLB. */ if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED)) goto retry; if ((oldpde & PG_G) != 0) pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); else anychanged = TRUE; } return (anychanged); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t va_next; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t ptpaddr, *pde; pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V; boolean_t anychanged; KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); if (prot == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == (VM_PROT_WRITE|VM_PROT_EXECUTE)) return; PG_G = pmap_global_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); anychanged = FALSE; PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { va_next = (sva + NBPML4) & ~PML4MASK; if (va_next < sva) va_next = eva; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { va_next = (sva + NBPDP) & ~PDPMASK; if (va_next < sva) va_next = eva; continue; } va_next = (sva + NBPDR) & ~PDRMASK; if (va_next < sva) va_next = eva; pde = pmap_pdpe_to_pde(pdpe, sva); ptpaddr = *pde; /* * Weed out invalid mappings. */ if (ptpaddr == 0) continue; /* * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { /* * Are we protecting the entire large page? If not, * demote the mapping and fall through. */ if (sva + NBPDR == va_next && eva >= va_next) { /* * The TLB entry for a PG_G mapping is * invalidated by pmap_protect_pde(). */ if (pmap_protect_pde(pmap, pde, sva, prot)) anychanged = TRUE; continue; } else if (!pmap_demote_pde(pmap, pde, sva)) { /* * The large page mapping was destroyed. */ continue; } } if (va_next > eva) va_next = eva; for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, sva += PAGE_SIZE) { pt_entry_t obits, pbits; vm_page_t m; retry: obits = pbits = *pte; if ((pbits & PG_V) == 0) continue; if ((prot & VM_PROT_WRITE) == 0) { if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == (PG_MANAGED | PG_M | PG_RW)) { m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); vm_page_dirty(m); } pbits &= ~(PG_RW | PG_M); } if ((prot & VM_PROT_EXECUTE) == 0) pbits |= pg_nx; if (pbits != obits) { if (!atomic_cmpset_long(pte, obits, pbits)) goto retry; if (obits & PG_G) pmap_invalidate_page(pmap, sva); else anychanged = TRUE; } } } if (anychanged) pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); } /* * Tries to promote the 512, contiguous 4KB page mappings that are within a * single page table page (PTP) to a single 2MB page mapping. For promotion * to occur, two conditions must be met: (1) the 4KB page mappings must map * aligned, contiguous physical memory and (2) the 4KB page mappings must have * identical characteristics. */ static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp) { pd_entry_t newpde; pt_entry_t *firstpte, oldpte, pa, *pte; pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V; vm_page_t mpte; int PG_PTE_CACHE; PG_A = pmap_accessed_bit(pmap); PG_G = pmap_global_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); PG_PTE_CACHE = pmap_cache_mask(pmap, 0); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * Examine the first PTE in the specified PTP. Abort if this PTE is * either invalid, unused, or does not map the first 4KB physical page * within a 2MB page. */ firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME); setpde: newpde = *firstpte; if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) { atomic_add_long(&pmap_pde_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" " in pmap %p", va, pmap); return; } if ((newpde & (PG_M | PG_RW)) == PG_RW) { /* * When PG_M is already clear, PG_RW can be cleared without * a TLB invalidation. */ if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW)) goto setpde; newpde &= ~PG_RW; } /* * Examine each of the other PTEs in the specified PTP. Abort if this * PTE maps an unexpected 4KB physical page or does not have identical * characteristics to the first PTE. */ pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE; for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { setpte: oldpte = *pte; if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { atomic_add_long(&pmap_pde_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" " in pmap %p", va, pmap); return; } if ((oldpte & (PG_M | PG_RW)) == PG_RW) { /* * When PG_M is already clear, PG_RW can be cleared * without a TLB invalidation. */ if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW)) goto setpte; oldpte &= ~PG_RW; CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx" " in pmap %p", (oldpte & PG_FRAME & PDRMASK) | (va & ~PDRMASK), pmap); } if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { atomic_add_long(&pmap_pde_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" " in pmap %p", va, pmap); return; } pa -= PAGE_SIZE; } /* * Save the page table page in its current state until the PDE * mapping the superpage is demoted by pmap_demote_pde() or * destroyed by pmap_remove_pde(). */ mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], ("pmap_promote_pde: page table page is out of range")); KASSERT(mpte->pindex == pmap_pde_pindex(va), ("pmap_promote_pde: page table page's pindex is wrong")); if (pmap_insert_pt_page(pmap, mpte)) { atomic_add_long(&pmap_pde_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx in pmap %p", va, pmap); return; } /* * Promote the pv entries. */ if ((newpde & PG_MANAGED) != 0) pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp); /* * Propagate the PAT index to its proper position. */ newpde = pmap_swap_pat(pmap, newpde); /* * Map the superpage. */ if (workaround_erratum383) pmap_update_pde(pmap, va, pde, PG_PS | newpde); else pde_store(pde, PG_PROMOTED | PG_PS | newpde); atomic_add_long(&pmap_pde_promotions, 1); CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx" " in pmap %p", va, pmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. * * When destroying both a page table and PV entry, this function * performs the TLB invalidation before releasing the PV list * lock, so we do not need pmap_delayed_invl_page() calls here. */ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind __unused) { struct rwlock *lock; pd_entry_t *pde; pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V; pt_entry_t newpte, origpte; pv_entry_t pv; vm_paddr_t opa, pa; vm_page_t mpte, om; + int rv; boolean_t nosleep; PG_A = pmap_accessed_bit(pmap); PG_G = pmap_global_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); va = trunc_page(va); KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va)); KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); newpte = (pt_entry_t)(pa | PG_A | PG_V); if ((flags & VM_PROT_WRITE) != 0) newpte |= PG_M; if ((prot & VM_PROT_WRITE) != 0) newpte |= PG_RW; KASSERT((newpte & (PG_M | PG_RW)) != PG_M, ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); if ((prot & VM_PROT_EXECUTE) == 0) newpte |= pg_nx; if ((flags & PMAP_ENTER_WIRED) != 0) newpte |= PG_W; if (va < VM_MAXUSER_ADDRESS) newpte |= PG_U; if (pmap == kernel_pmap) newpte |= PG_G; newpte |= pmap_cache_bits(pmap, m->md.pat_mode, 0); /* * Set modified bit gratuitously for writeable mappings if * the page is unmanaged. We do not want to take a fault * to do the dirty bit accounting for these mappings. */ if ((m->oflags & VPO_UNMANAGED) != 0) { if ((newpte & PG_RW) != 0) newpte |= PG_M; } else newpte |= PG_MANAGED; mpte = NULL; lock = NULL; PMAP_LOCK(pmap); /* * In the case that a page table page is not * resident, we are creating it here. */ retry: pde = pmap_pde(pmap, va); if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 || pmap_demote_pde_locked(pmap, pde, va, &lock))) { pte = pmap_pde_to_pte(pde, va); if (va < VM_MAXUSER_ADDRESS && mpte == NULL) { mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); mpte->wire_count++; } } else if (va < VM_MAXUSER_ADDRESS) { /* * Here if the pte page isn't mapped, or if it has been * deallocated. */ nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), nosleep ? NULL : &lock); if (mpte == NULL && nosleep) { - if (lock != NULL) - rw_wunlock(lock); - PMAP_UNLOCK(pmap); - return (KERN_RESOURCE_SHORTAGE); + rv = KERN_RESOURCE_SHORTAGE; + goto out; } goto retry; } else panic("pmap_enter: invalid page directory va=%#lx", va); origpte = *pte; /* * Is the specified virtual address already mapped? */ if ((origpte & PG_V) != 0) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) pmap->pm_stats.wired_count++; else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) pmap->pm_stats.wired_count--; /* * Remove the extra PT page reference. */ if (mpte != NULL) { mpte->wire_count--; KASSERT(mpte->wire_count > 0, ("pmap_enter: missing reference to page table page," " va: 0x%lx", va)); } /* * Has the physical page changed? */ opa = origpte & PG_FRAME; if (opa == pa) { /* * No, might be a protection or wiring change. */ if ((origpte & PG_MANAGED) != 0 && (newpte & PG_RW) != 0) vm_page_aflag_set(m, PGA_WRITEABLE); if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) goto unchanged; goto validate; } } else { /* * Increment the counters. */ if ((newpte & PG_W) != 0) pmap->pm_stats.wired_count++; pmap_resident_count_inc(pmap, 1); } /* * Enter on the PV list if part of our managed memory. */ if ((newpte & PG_MANAGED) != 0) { pv = get_pv_entry(pmap, &lock); pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if ((newpte & PG_RW) != 0) vm_page_aflag_set(m, PGA_WRITEABLE); } /* * Update the PTE. */ if ((origpte & PG_V) != 0) { validate: origpte = pte_load_store(pte, newpte); opa = origpte & PG_FRAME; if (opa != pa) { if ((origpte & PG_MANAGED) != 0) { om = PHYS_TO_VM_PAGE(opa); if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(om); if ((origpte & PG_A) != 0) vm_page_aflag_set(om, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); pmap_pvh_free(&om->md, pmap, va); if ((om->aflags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&om->md.pv_list) && ((om->flags & PG_FICTITIOUS) != 0 || TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) vm_page_aflag_clear(om, PGA_WRITEABLE); } } else if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { if ((origpte & PG_MANAGED) != 0) vm_page_dirty(m); /* * Although the PTE may still have PG_RW set, TLB * invalidation may nonetheless be required because * the PTE no longer has PG_M set. */ } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) { /* * This PTE change does not require TLB invalidation. */ goto unchanged; } if ((origpte & PG_A) != 0) pmap_invalidate_page(pmap, va); } else pte_store(pte, newpte); unchanged: /* * If both the page table page and the reservation are fully * populated, then attempt promotion. */ if ((mpte == NULL || mpte->wire_count == NPTEPG) && pmap_ps_enabled(pmap) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va, &lock); + rv = KERN_SUCCESS; +out: if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); - return (KERN_SUCCESS); + return (rv); } /* * Tries to create a 2MB page mapping. Returns TRUE if successful and FALSE * otherwise. Fails if (1) a page table page cannot be allocated without * blocking, (2) a mapping already exists at the specified virtual address, or * (3) a pv entry cannot be allocated without reclaiming another pv entry. */ static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, struct rwlock **lockp) { pd_entry_t *pde, newpde; pt_entry_t PG_V; vm_page_t mpde; struct spglist free; PG_V = pmap_valid_bit(pmap); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((mpde = pmap_allocpde(pmap, va, NULL)) == NULL) { CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); } pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde)); pde = &pde[pmap_pde_index(va)]; if ((*pde & PG_V) != 0) { KASSERT(mpde->wire_count > 1, ("pmap_enter_pde: mpde's wire count is too low")); mpde->wire_count--; CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); } newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | PG_PS | PG_V; if ((m->oflags & VPO_UNMANAGED) == 0) { newpde |= PG_MANAGED; /* * Abort this mapping if its PV entry could not be created. */ if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m), lockp)) { SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, va, mpde, &free)) { /* * Although "va" is not mapped, paging- * structure caches could nonetheless have * entries that refer to the freed page table * pages. Invalidate those entries. */ pmap_invalidate_page(pmap, va); pmap_free_zero_pages(&free); } CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); } } if ((prot & VM_PROT_EXECUTE) == 0) newpde |= pg_nx; if (va < VM_MAXUSER_ADDRESS) newpde |= PG_U; /* * Increment counters. */ pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE); /* * Map the superpage. (This is not a promoted mapping; there will not * be any lingering 4KB page mappings in the TLB.) */ pde_store(pde, newpde); atomic_add_long(&pmap_pde_mappings, 1); CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" " in pmap %p", va, pmap); return (TRUE); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { struct rwlock *lock; vm_offset_t va; vm_page_t m, mpte; vm_pindex_t diff, psize; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); mpte = NULL; m = m_start; lock = NULL; PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { va = start + ptoa(diff); if ((va & PDRMASK) == 0 && va + NBPDR <= end && m->psind == 1 && pmap_ps_enabled(pmap) && pmap_enter_pde(pmap, va, m, prot, &lock)) m = &m[NBPDR / PAGE_SIZE - 1]; else mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock); m = TAILQ_NEXT(m, listq); } if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { struct rwlock *lock; lock = NULL; PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); } static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) { struct spglist free; pt_entry_t *pte, PG_V; vm_paddr_t pa; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); PG_V = pmap_valid_bit(pmap); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { vm_pindex_t ptepindex; pd_entry_t *ptepa; /* * Calculate pagetable page index */ ptepindex = pmap_pde_pindex(va); if (mpte && (mpte->pindex == ptepindex)) { mpte->wire_count++; } else { /* * Get the page directory entry */ ptepa = pmap_pde(pmap, va); /* * If the page table page is mapped, we just increment * the hold count, and activate it. Otherwise, we * attempt to allocate a page table page. If this * attempt fails, we don't retry. Instead, we give up. */ if (ptepa && (*ptepa & PG_V) != 0) { if (*ptepa & PG_PS) return (NULL); mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME); mpte->wire_count++; } else { /* * Pass NULL instead of the PV list lock * pointer, because we don't intend to sleep. */ mpte = _pmap_allocpte(pmap, ptepindex, NULL); if (mpte == NULL) return (mpte); } } pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); pte = &pte[pmap_pte_index(va)]; } else { mpte = NULL; pte = vtopte(va); } if (*pte) { if (mpte != NULL) { mpte->wire_count--; mpte = NULL; } return (mpte); } /* * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { if (mpte != NULL) { SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, va, mpte, &free)) { /* * Although "va" is not mapped, paging- * structure caches could nonetheless have * entries that refer to the freed page table * pages. Invalidate those entries. */ pmap_invalidate_page(pmap, va); pmap_free_zero_pages(&free); } mpte = NULL; } return (mpte); } /* * Increment counters */ pmap_resident_count_inc(pmap, 1); pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0); if ((prot & VM_PROT_EXECUTE) == 0) pa |= pg_nx; /* * Now validate mapping with RO protection */ if ((m->oflags & VPO_UNMANAGED) != 0) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); return (mpte); } /* * Make a temporary mapping for a physical address. This is only intended * to be used for panic dumps. */ void * pmap_kenter_temporary(vm_paddr_t pa, int i) { vm_offset_t va; va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); pmap_kenter(va, pa); invlpg(va); return ((void *)crashdumpmap); } /* * This code maps large physical mmap regions into the * processor address space. Note that some shortcuts * are taken, but the code works. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { pd_entry_t *pde; pt_entry_t PG_A, PG_M, PG_RW, PG_V; vm_paddr_t pa, ptepa; vm_page_t p, pdpg; int pat_mode; PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("pmap_object_init_pt: non-device object")); if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { if (!pmap_ps_enabled(pmap)) return; if (!vm_object_populate(object, pindex, pindex + atop(size))) return; p = vm_page_lookup(object, pindex); KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); pat_mode = p->md.pat_mode; /* * Abort the mapping if the first page is not physically * aligned to a 2MB page boundary. */ ptepa = VM_PAGE_TO_PHYS(p); if (ptepa & (NBPDR - 1)) return; /* * Skip the first page. Abort the mapping if the rest of * the pages are not physically contiguous or have differing * memory attributes. */ p = TAILQ_NEXT(p, listq); for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; pa += PAGE_SIZE) { KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); if (pa != VM_PAGE_TO_PHYS(p) || pat_mode != p->md.pat_mode) return; p = TAILQ_NEXT(p, listq); } /* * Map using 2MB pages. Since "ptepa" is 2M aligned and * "size" is a multiple of 2M, adding the PAT setting to "pa" * will not affect the termination of this loop. */ PMAP_LOCK(pmap); for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); pa < ptepa + size; pa += NBPDR) { pdpg = pmap_allocpde(pmap, addr, NULL); if (pdpg == NULL) { /* * The creation of mappings below is only an * optimization. If a page directory page * cannot be allocated without blocking, * continue on to the next mapping rather than * blocking. */ addr += NBPDR; continue; } pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); pde = &pde[pmap_pde_index(addr)]; if ((*pde & PG_V) == 0) { pde_store(pde, pa | PG_PS | PG_M | PG_A | PG_U | PG_RW | PG_V); pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE); atomic_add_long(&pmap_pde_mappings, 1); } else { /* Continue on if the PDE is already valid. */ pdpg->wire_count--; KASSERT(pdpg->wire_count > 0, ("pmap_object_init_pt: missing reference " "to page directory page, va: 0x%lx", addr)); } addr += NBPDR; } PMAP_UNLOCK(pmap); } } /* * Clear the wired attribute from the mappings for the specified range of * addresses in the given pmap. Every valid mapping within that range * must have the wired attribute set. In contrast, invalid mappings * cannot have the wired attribute set, so they are ignored. * * The wired attribute of the page table entry is not a hardware * feature, so there is no need to invalidate any TLB entries. * Since pmap_demote_pde() for the wired entry must never fail, * pmap_delayed_invl_started()/finished() calls around the * function are not needed. */ void pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t va_next; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t *pde; pt_entry_t *pte, PG_V; PG_V = pmap_valid_bit(pmap); PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { va_next = (sva + NBPML4) & ~PML4MASK; if (va_next < sva) va_next = eva; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { va_next = (sva + NBPDP) & ~PDPMASK; if (va_next < sva) va_next = eva; continue; } va_next = (sva + NBPDR) & ~PDRMASK; if (va_next < sva) va_next = eva; pde = pmap_pdpe_to_pde(pdpe, sva); if ((*pde & PG_V) == 0) continue; if ((*pde & PG_PS) != 0) { if ((*pde & PG_W) == 0) panic("pmap_unwire: pde %#jx is missing PG_W", (uintmax_t)*pde); /* * Are we unwiring the entire large page? If not, * demote the mapping and fall through. */ if (sva + NBPDR == va_next && eva >= va_next) { atomic_clear_long(pde, PG_W); pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; continue; } else if (!pmap_demote_pde(pmap, pde, sva)) panic("pmap_unwire: demotion failed"); } if (va_next > eva) va_next = eva; for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, sva += PAGE_SIZE) { if ((*pte & PG_V) == 0) continue; if ((*pte & PG_W) == 0) panic("pmap_unwire: pte %#jx is missing PG_W", (uintmax_t)*pte); /* * PG_W must be cleared atomically. Although the pmap * lock synchronizes access to PG_W, another processor * could be setting PG_M and/or PG_A concurrently. */ atomic_clear_long(pte, PG_W); pmap->pm_stats.wired_count--; } } PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { struct rwlock *lock; struct spglist free; vm_offset_t addr; vm_offset_t end_addr = src_addr + len; vm_offset_t va_next; pt_entry_t PG_A, PG_M, PG_V; if (dst_addr != src_addr) return; if (dst_pmap->pm_type != src_pmap->pm_type) return; /* * EPT page table entries that require emulation of A/D bits are * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit * (aka EPT_PG_EXECUTE) could still be set. Since some EPT * implementations flag an EPT misconfiguration for exec-only * mappings we skip this function entirely for emulated pmaps. */ if (pmap_emulate_ad_bits(dst_pmap)) return; lock = NULL; if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); } else { PMAP_LOCK(src_pmap); PMAP_LOCK(dst_pmap); } PG_A = pmap_accessed_bit(dst_pmap); PG_M = pmap_modified_bit(dst_pmap); PG_V = pmap_valid_bit(dst_pmap); for (addr = src_addr; addr < end_addr; addr = va_next) { pt_entry_t *src_pte, *dst_pte; vm_page_t dstmpde, dstmpte, srcmpte; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t srcptepaddr, *pde; KASSERT(addr < UPT_MIN_ADDRESS, ("pmap_copy: invalid to pmap_copy page tables")); pml4e = pmap_pml4e(src_pmap, addr); if ((*pml4e & PG_V) == 0) { va_next = (addr + NBPML4) & ~PML4MASK; if (va_next < addr) va_next = end_addr; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, addr); if ((*pdpe & PG_V) == 0) { va_next = (addr + NBPDP) & ~PDPMASK; if (va_next < addr) va_next = end_addr; continue; } va_next = (addr + NBPDR) & ~PDRMASK; if (va_next < addr) va_next = end_addr; pde = pmap_pdpe_to_pde(pdpe, addr); srcptepaddr = *pde; if (srcptepaddr == 0) continue; if (srcptepaddr & PG_PS) { if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) continue; dstmpde = pmap_allocpde(dst_pmap, addr, NULL); if (dstmpde == NULL) break; pde = (pd_entry_t *) PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde)); pde = &pde[pmap_pde_index(addr)]; if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 || pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr & PG_PS_FRAME, &lock))) { *pde = srcptepaddr & ~PG_W; pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE); atomic_add_long(&pmap_pde_mappings, 1); } else dstmpde->wire_count--; continue; } srcptepaddr &= PG_FRAME; srcmpte = PHYS_TO_VM_PAGE(srcptepaddr); KASSERT(srcmpte->wire_count > 0, ("pmap_copy: source page table page is unused")); if (va_next > end_addr) va_next = end_addr; src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr); src_pte = &src_pte[pmap_pte_index(addr)]; dstmpte = NULL; while (addr < va_next) { pt_entry_t ptetemp; ptetemp = *src_pte; /* * we only virtual copy managed pages */ if ((ptetemp & PG_MANAGED) != 0) { if (dstmpte != NULL && dstmpte->pindex == pmap_pde_pindex(addr)) dstmpte->wire_count++; else if ((dstmpte = pmap_allocpte(dst_pmap, addr, NULL)) == NULL) goto out; dst_pte = (pt_entry_t *) PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); dst_pte = &dst_pte[pmap_pte_index(addr)]; if (*dst_pte == 0 && pmap_try_insert_pv_entry(dst_pmap, addr, PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), &lock)) { /* * Clear the wired, modified, and * accessed (referenced) bits * during the copy. */ *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A); pmap_resident_count_inc(dst_pmap, 1); } else { SLIST_INIT(&free); if (pmap_unwire_ptp(dst_pmap, addr, dstmpte, &free)) { /* * Although "addr" is not * mapped, paging-structure * caches could nonetheless * have entries that refer to * the freed page table pages. * Invalidate those entries. */ pmap_invalidate_page(dst_pmap, addr); pmap_free_zero_pages(&free); } goto out; } if (dstmpte->wire_count >= srcmpte->wire_count) break; } addr += PAGE_SIZE; src_pte++; } } out: if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } /* * Zero the specified hardware page. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); pagezero((void *)va); } /* * Zero an an area within a single hardware page. off and size must not * cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); if (off == 0 && size == PAGE_SIZE) pagezero((void *)va); else bzero((char *)va + off, size); } /* * Copy 1 specified hardware page to another. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); pagecopy((void *)src, (void *)dst); } int unmapped_buf_allowed = 1; void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize) { void *a_cp, *b_cp; vm_page_t pages[2]; vm_offset_t vaddr[2], a_pg_offset, b_pg_offset; int cnt; boolean_t mapped; while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; pages[0] = ma[a_offset >> PAGE_SHIFT]; b_pg_offset = b_offset & PAGE_MASK; pages[1] = mb[b_offset >> PAGE_SHIFT]; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); cnt = min(cnt, PAGE_SIZE - b_pg_offset); mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE); a_cp = (char *)vaddr[0] + a_pg_offset; b_cp = (char *)vaddr[1] + b_pg_offset; bcopy(a_cp, b_cp, cnt); if (__predict_false(mapped)) pmap_unmap_io_transient(pages, vaddr, 2, FALSE); a_offset += cnt; b_offset += cnt; xfersize -= cnt; } } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; struct rwlock *lock; pv_entry_t pv; int loops = 0; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } } rw_runlock(lock); return (rv); } /* * pmap_page_wired_mappings: * * Return the number of managed mappings to the given physical page * that are wired. */ int pmap_page_wired_mappings(vm_page_t m) { struct rwlock *lock; struct md_page *pvh; pmap_t pmap; pt_entry_t *pte; pv_entry_t pv; int count, md_gen, pvh_gen; if ((m->oflags & VPO_UNMANAGED) != 0) return (0); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: count = 0; TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte(pmap, pv->pv_va); if ((*pte & PG_W) != 0) count++; PMAP_UNLOCK(pmap); } if ((m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; pvh_gen = pvh->pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen || pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pde(pmap, pv->pv_va); if ((*pte & PG_W) != 0) count++; PMAP_UNLOCK(pmap); } } rw_runlock(lock); return (count); } /* * Returns TRUE if the given page is mapped individually or as part of * a 2mpage. Otherwise, returns FALSE. */ boolean_t pmap_page_is_mapped(vm_page_t m) { struct rwlock *lock; boolean_t rv; if ((m->oflags & VPO_UNMANAGED) != 0) return (FALSE); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || ((m->flags & PG_FICTITIOUS) == 0 && !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); rw_runlock(lock); return (rv); } /* * Destroy all managed, non-wired mappings in the given user-space * pmap. This pmap cannot be active on any processor besides the * caller. * * This function cannot be applied to the kernel pmap. Moreover, it * is not intended for general use. It is only to be used during * process termination. Consequently, it can be implemented in ways * that make it faster than pmap_remove(). First, it can more quickly * destroy mappings by iterating over the pmap's collection of PV * entries, rather than searching the page table. Second, it doesn't * have to test and clear the page table entries atomically, because * no processor is currently accessing the user address space. In * particular, a page table entry's dirty bit won't change state once * this function starts. */ void pmap_remove_pages(pmap_t pmap) { pd_entry_t ptepde; pt_entry_t *pte, tpte; pt_entry_t PG_M, PG_RW, PG_V; struct spglist free; vm_page_t m, mpte, mt; pv_entry_t pv; struct md_page *pvh; struct pv_chunk *pc, *npc; struct rwlock *lock; int64_t bit; uint64_t inuse, bitmask; int allfree, field, freed, idx; boolean_t superpage; vm_paddr_t pa; /* * Assert that the given pmap is only active on the current * CPU. Unfortunately, we cannot block another CPU from * activating the pmap while this function is executing. */ KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap)); #ifdef INVARIANTS { cpuset_t other_cpus; other_cpus = all_cpus; critical_enter(); CPU_CLR(PCPU_GET(cpuid), &other_cpus); CPU_AND(&other_cpus, &pmap->pm_active); critical_exit(); KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap)); } #endif lock = NULL; PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); SLIST_INIT(&free); PMAP_LOCK(pmap); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { allfree = 1; freed = 0; for (field = 0; field < _NPCM; field++) { inuse = ~pc->pc_map[field] & pc_freemask[field]; while (inuse != 0) { bit = bsfq(inuse); bitmask = 1UL << bit; idx = field * 64 + bit; pv = &pc->pc_pventry[idx]; inuse &= ~bitmask; pte = pmap_pdpe(pmap, pv->pv_va); ptepde = *pte; pte = pmap_pdpe_to_pde(pte, pv->pv_va); tpte = *pte; if ((tpte & (PG_PS | PG_V)) == PG_V) { superpage = FALSE; ptepde = tpte; pte = (pt_entry_t *)PHYS_TO_DMAP(tpte & PG_FRAME); pte = &pte[pmap_pte_index(pv->pv_va)]; tpte = *pte; } else { /* * Keep track whether 'tpte' is a * superpage explicitly instead of * relying on PG_PS being set. * * This is because PG_PS is numerically * identical to PG_PTE_PAT and thus a * regular page could be mistaken for * a superpage. */ superpage = TRUE; } if ((tpte & PG_V) == 0) { panic("bad pte va %lx pte %lx", pv->pv_va, tpte); } /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & PG_W) { allfree = 0; continue; } if (superpage) pa = tpte & PG_PS_FRAME; else pa = tpte & PG_FRAME; m = PHYS_TO_VM_PAGE(pa); KASSERT(m->phys_addr == pa, ("vm_page_t %p phys_addr mismatch %016jx %016jx", m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); KASSERT((m->flags & PG_FICTITIOUS) != 0 || m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte)); pte_clear(pte); /* * Update the vm_page_t clean/reference bits. */ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { if (superpage) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) vm_page_dirty(mt); } else vm_page_dirty(m); } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); /* Mark free */ pc->pc_map[field] |= bitmask; if (superpage) { pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE); pvh = pa_to_pvh(tpte & PG_PS_FRAME); TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) if ((mt->aflags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&mt->md.pv_list)) vm_page_aflag_clear(mt, PGA_WRITEABLE); } mpte = pmap_remove_pt_page(pmap, pv->pv_va); if (mpte != NULL) { pmap_resident_count_dec(pmap, 1); KASSERT(mpte->wire_count == NPTEPG, ("pmap_remove_pages: pte page wire count error")); mpte->wire_count = 0; pmap_add_delayed_free_list(mpte, &free, FALSE); atomic_subtract_int(&vm_cnt.v_wire_count, 1); } } else { pmap_resident_count_dec(pmap, 1); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if ((m->aflags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } } pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free); freed++; } } PV_STAT(atomic_add_long(&pv_entry_frees, freed)); PV_STAT(atomic_add_int(&pv_entry_spare, freed)); PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); if (allfree) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } } if (lock != NULL) rw_wunlock(lock); pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); pmap_free_zero_pages(&free); } static boolean_t pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) { struct rwlock *lock; pv_entry_t pv; struct md_page *pvh; pt_entry_t *pte, mask; pt_entry_t PG_A, PG_M, PG_RW, PG_V; pmap_t pmap; int md_gen, pvh_gen; boolean_t rv; rv = FALSE; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte(pmap, pv->pv_va); mask = 0; if (modified) { PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); mask |= PG_RW | PG_M; } if (accessed) { PG_A = pmap_accessed_bit(pmap); PG_V = pmap_valid_bit(pmap); mask |= PG_V | PG_A; } rv = (*pte & mask) == mask; PMAP_UNLOCK(pmap); if (rv) goto out; } if ((m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; pvh_gen = pvh->pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen || pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pde(pmap, pv->pv_va); mask = 0; if (modified) { PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); mask |= PG_RW | PG_M; } if (accessed) { PG_A = pmap_accessed_bit(pmap); PG_V = pmap_valid_bit(pmap); mask |= PG_V | PG_A; } rv = (*pte & mask) == mask; PMAP_UNLOCK(pmap); if (rv) goto out; } } out: rw_runlock(lock); return (rv); } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); return (pmap_page_test_mappings(m, FALSE, TRUE)); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is eligible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pd_entry_t *pde; pt_entry_t *pte, PG_V; boolean_t rv; PG_V = pmap_valid_bit(pmap); rv = FALSE; PMAP_LOCK(pmap); pde = pmap_pde(pmap, addr); if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) { pte = pmap_pde_to_pte(pde, addr); rv = (*pte & PG_V) == 0; } PMAP_UNLOCK(pmap); return (rv); } /* * pmap_is_referenced: * * Return whether or not the specified physical page was referenced * in any physical maps. */ boolean_t pmap_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); return (pmap_page_test_mappings(m, TRUE, FALSE)); } /* * Clear the write and modified bits in each of the given page's mappings. */ void pmap_remove_write(vm_page_t m) { struct md_page *pvh; pmap_t pmap; struct rwlock *lock; pv_entry_t next_pv, pv; pd_entry_t *pde; pt_entry_t oldpte, *pte, PG_M, PG_RW; vm_offset_t va; int pvh_gen, md_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * set by another thread while the object is locked. Thus, * if PGA_WRITEABLE is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return; lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); retry_pv_loop: rw_wlock(lock); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); rw_wunlock(lock); goto retry_pv_loop; } } PG_RW = pmap_rw_bit(pmap); va = pv->pv_va; pde = pmap_pde(pmap, va); if ((*pde & PG_RW) != 0) (void)pmap_demote_pde_locked(pmap, pde, va, &lock); KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), ("inconsistent pv lock %p %p for page %p", lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); PMAP_UNLOCK(pmap); } TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); rw_wunlock(lock); goto retry_pv_loop; } } PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_remove_write: found a 2mpage in page %p's pv list", m)); pte = pmap_pde_to_pte(pde, pv->pv_va); retry: oldpte = *pte; if (oldpte & PG_RW) { if (!atomic_cmpset_long(pte, oldpte, oldpte & ~(PG_RW | PG_M))) goto retry; if ((oldpte & PG_M) != 0) vm_page_dirty(m); pmap_invalidate_page(pmap, pv->pv_va); } PMAP_UNLOCK(pmap); } rw_wunlock(lock); vm_page_aflag_clear(m, PGA_WRITEABLE); pmap_delayed_invl_wait(m); } static __inline boolean_t safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte) { if (!pmap_emulate_ad_bits(pmap)) return (TRUE); KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type)); /* * XWR = 010 or 110 will cause an unconditional EPT misconfiguration * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared * if the EPT_PG_WRITE bit is set. */ if ((pte & EPT_PG_WRITE) != 0) return (FALSE); /* * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set. */ if ((pte & EPT_PG_EXECUTE) == 0 || ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0)) return (TRUE); else return (FALSE); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * As an optimization, update the page's dirty field if a modified bit is * found while counting reference bits. This opportunistic update can be * performed at low cost and can eliminate the need for some future calls * to pmap_is_modified(). However, since this function stops after * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some * dirty pages. Those dirty pages will only be detected by a future call * to pmap_is_modified(). * * A DI block is not needed within this function, because * invalidations are performed before the PV list lock is * released. */ int pmap_ts_referenced(vm_page_t m) { struct md_page *pvh; pv_entry_t pv, pvf; pmap_t pmap; struct rwlock *lock; pd_entry_t oldpde, *pde; pt_entry_t *pte, PG_A, PG_M, PG_RW; vm_offset_t va; vm_paddr_t pa; int cleared, md_gen, not_cleared, pvh_gen; struct spglist free; boolean_t demoted; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); SLIST_INIT(&free); cleared = 0; pa = VM_PAGE_TO_PHYS(m); lock = PHYS_TO_PV_LIST_LOCK(pa); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa); rw_wlock(lock); retry: not_cleared = 0; if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) goto small_mappings; pv = pvf; do { if (pvf == NULL) pvf = pv; pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); va = pv->pv_va; pde = pmap_pde(pmap, pv->pv_va); oldpde = *pde; if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) { /* * Although "oldpde" is mapping a 2MB page, because * this function is called at a 4KB page granularity, * we only update the 4KB page under test. */ vm_page_dirty(m); } if ((oldpde & PG_A) != 0) { /* * Since this reference bit is shared by 512 4KB * pages, it should not be cleared every time it is * tested. Apply a simple "hash" function on the * physical page number, the virtual superpage number, * and the pmap address to select one 4KB page out of * the 512 on which testing the reference bit will * result in clearing that reference bit. This * function is designed to avoid the selection of the * same 4KB page for every 2MB page mapping. * * On demotion, a mapping that hasn't been referenced * is simply destroyed. To avoid the possibility of a * subsequent page fault on a demoted wired mapping, * always leave its reference bit set. Moreover, * since the superpage is wired, the current state of * its reference bit won't affect page replacement. */ if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^ (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && (oldpde & PG_W) == 0) { if (safe_to_clear_referenced(pmap, oldpde)) { atomic_clear_long(pde, PG_A); pmap_invalidate_page(pmap, pv->pv_va); demoted = FALSE; } else if (pmap_demote_pde_locked(pmap, pde, pv->pv_va, &lock)) { /* * Remove the mapping to a single page * so that a subsequent access may * repromote. Since the underlying * page table page is fully populated, * this removal never frees a page * table page. */ demoted = TRUE; va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); pte = pmap_pde_to_pte(pde, va); pmap_remove_pte(pmap, pte, va, *pde, NULL, &lock); pmap_invalidate_page(pmap, va); } else demoted = TRUE; if (demoted) { /* * The superpage mapping was removed * entirely and therefore 'pv' is no * longer valid. */ if (pvf == pv) pvf = NULL; pv = NULL; } cleared++; KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), ("inconsistent pv lock %p %p for page %p", lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); } else not_cleared++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; } if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX) goto out; } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); small_mappings: if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) goto out; pv = pvf; do { if (pvf == NULL) pvf = pv; pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced: found a 2mpage in page %p's pv list", m)); pte = pmap_pde_to_pte(pde, pv->pv_va); if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if ((*pte & PG_A) != 0) { if (safe_to_clear_referenced(pmap, *pte)) { atomic_clear_long(pte, PG_A); pmap_invalidate_page(pmap, pv->pv_va); cleared++; } else if ((*pte & PG_W) == 0) { /* * Wired pages cannot be paged out so * doing accessed bit emulation for * them is wasted effort. We do the * hard work for unwired pages only. */ pmap_remove_pte(pmap, pte, pv->pv_va, *pde, &free, &lock); pmap_invalidate_page(pmap, pv->pv_va); cleared++; if (pvf == pv) pvf = NULL; pv = NULL; KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), ("inconsistent pv lock %p %p for page %p", lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); } else not_cleared++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; } } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + not_cleared < PMAP_TS_REFERENCED_MAX); out: rw_wunlock(lock); pmap_free_zero_pages(&free); return (cleared + not_cleared); } /* * Apply the given advice to the specified range of addresses within the * given pmap. Depending on the advice, clear the referenced and/or * modified flags in each mapping and set the mapped page's dirty field. */ void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) { struct rwlock *lock; pml4_entry_t *pml4e; pdp_entry_t *pdpe; pd_entry_t oldpde, *pde; pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V; vm_offset_t va, va_next; vm_page_t m; boolean_t anychanged; if (advice != MADV_DONTNEED && advice != MADV_FREE) return; /* * A/D bit emulation requires an alternate code path when clearing * the modified and accessed bits below. Since this function is * advisory in nature we skip it entirely for pmaps that require * A/D bit emulation. */ if (pmap_emulate_ad_bits(pmap)) return; PG_A = pmap_accessed_bit(pmap); PG_G = pmap_global_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); anychanged = FALSE; pmap_delayed_invl_started(); PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { va_next = (sva + NBPML4) & ~PML4MASK; if (va_next < sva) va_next = eva; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { va_next = (sva + NBPDP) & ~PDPMASK; if (va_next < sva) va_next = eva; continue; } va_next = (sva + NBPDR) & ~PDRMASK; if (va_next < sva) va_next = eva; pde = pmap_pdpe_to_pde(pdpe, sva); oldpde = *pde; if ((oldpde & PG_V) == 0) continue; else if ((oldpde & PG_PS) != 0) { if ((oldpde & PG_MANAGED) == 0) continue; lock = NULL; if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) { if (lock != NULL) rw_wunlock(lock); /* * The large page mapping was destroyed. */ continue; } /* * Unless the page mappings are wired, remove the * mapping to a single page so that a subsequent * access may repromote. Since the underlying page * table page is fully populated, this removal never * frees a page table page. */ if ((oldpde & PG_W) == 0) { pte = pmap_pde_to_pte(pde, sva); KASSERT((*pte & PG_V) != 0, ("pmap_advise: invalid PTE")); pmap_remove_pte(pmap, pte, sva, *pde, NULL, &lock); anychanged = TRUE; } if (lock != NULL) rw_wunlock(lock); } if (va_next > eva) va_next = eva; va = va_next; for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, sva += PAGE_SIZE) { if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) goto maybe_invlrng; else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { if (advice == MADV_DONTNEED) { /* * Future calls to pmap_is_modified() * can be avoided by making the page * dirty now. */ m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); vm_page_dirty(m); } atomic_clear_long(pte, PG_M | PG_A); } else if ((*pte & PG_A) != 0) atomic_clear_long(pte, PG_A); else goto maybe_invlrng; if ((*pte & PG_G) != 0) { if (va == va_next) va = sva; } else anychanged = TRUE; continue; maybe_invlrng: if (va != va_next) { pmap_invalidate_range(pmap, va, sva); va = va_next; } } if (va != va_next) pmap_invalidate_range(pmap, va, sva); } if (anychanged) pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); pmap_delayed_invl_finished(); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { struct md_page *pvh; pmap_t pmap; pv_entry_t next_pv, pv; pd_entry_t oldpde, *pde; pt_entry_t oldpte, *pte, PG_M, PG_RW, PG_V; struct rwlock *lock; vm_offset_t va; int md_gen, pvh_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT(!vm_page_xbusied(m), ("pmap_clear_modify: page %p is exclusive busied", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_wlock(lock); restart: TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); va = pv->pv_va; pde = pmap_pde(pmap, va); oldpde = *pde; if ((oldpde & PG_RW) != 0) { if (pmap_demote_pde_locked(pmap, pde, va, &lock)) { if ((oldpde & PG_W) == 0) { /* * Write protect the mapping to a * single page so that a subsequent * write access may repromote. */ va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); pte = pmap_pde_to_pte(pde, va); oldpte = *pte; if ((oldpte & PG_V) != 0) { while (!atomic_cmpset_long(pte, oldpte, oldpte & ~(PG_M | PG_RW))) oldpte = *pte; vm_page_dirty(m); pmap_invalidate_page(pmap, va); } } } } PMAP_UNLOCK(pmap); } TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" " a 2mpage in page %p's pv list", m)); pte = pmap_pde_to_pte(pde, pv->pv_va); if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { atomic_clear_long(pte, PG_M); pmap_invalidate_page(pmap, pv->pv_va); } PMAP_UNLOCK(pmap); } rw_wunlock(lock); } /* * Miscellaneous support routines follow */ /* Adjust the cache mode for a 4KB page mapped via a PTE. */ static __inline void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask) { u_int opte, npte; /* * The cache mode bits are all in the low 32-bits of the * PTE, so we can just spin on updating the low 32-bits. */ do { opte = *(u_int *)pte; npte = opte & ~mask; npte |= cache_bits; } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); } /* Adjust the cache mode for a 2MB page mapped via a PDE. */ static __inline void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask) { u_int opde, npde; /* * The cache mode bits are all in the low 32-bits of the * PDE, so we can just spin on updating the low 32-bits. */ do { opde = *(u_int *)pde; npde = opde & ~mask; npde |= cache_bits; } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { struct pmap_preinit_mapping *ppim; vm_offset_t va, offset; vm_size_t tmpsize; int i; offset = pa & PAGE_MASK; size = round_page(offset + size); pa = trunc_page(pa); if (!pmap_initialized) { va = 0; for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->va == 0) { ppim->pa = pa; ppim->sz = size; ppim->mode = mode; ppim->va = virtual_avail; virtual_avail += size; va = ppim->va; break; } } if (va == 0) panic("%s: too many preinit mappings", __func__); } else { /* * If we have a preinit mapping, re-use it. */ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->pa == pa && ppim->sz == size && ppim->mode == mode) return ((void *)(ppim->va + offset)); } /* * If the specified range of physical addresses fits within * the direct map window, use the direct map. */ if (pa < dmaplimit && pa + size < dmaplimit) { va = PHYS_TO_DMAP(pa); if (!pmap_change_attr(va, size, mode)) return ((void *)(va + offset)); } va = kva_alloc(size); if (va == 0) panic("%s: Couldn't allocate KVA", __func__); } for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); pmap_invalidate_range(kernel_pmap, va, va + tmpsize); pmap_invalidate_cache_range(va, va + tmpsize, FALSE); return ((void *)(va + offset)); } void * pmap_mapdev(vm_paddr_t pa, vm_size_t size) { return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); } void pmap_unmapdev(vm_offset_t va, vm_size_t size) { struct pmap_preinit_mapping *ppim; vm_offset_t offset; int i; /* If we gave a direct map region in pmap_mapdev, do nothing */ if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) return; offset = va & PAGE_MASK; size = round_page(offset + size); va = trunc_page(va); for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->va == va && ppim->sz == size) { if (pmap_initialized) return; ppim->pa = 0; ppim->va = 0; ppim->sz = 0; ppim->mode = 0; if (va + size == virtual_avail) virtual_avail = va; return; } } if (pmap_initialized) kva_free(va, size); } /* * Tries to demote a 1GB page mapping. */ static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va) { pdp_entry_t newpdpe, oldpdpe; pd_entry_t *firstpde, newpde, *pde; pt_entry_t PG_A, PG_M, PG_RW, PG_V; vm_paddr_t mpdepa; vm_page_t mpde; PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpdpe = *pdpe; KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V), ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V")); if ((mpde = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); } mpdepa = VM_PAGE_TO_PHYS(mpde); firstpde = (pd_entry_t *)PHYS_TO_DMAP(mpdepa); newpdpe = mpdepa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V; KASSERT((oldpdpe & PG_A) != 0, ("pmap_demote_pdpe: oldpdpe is missing PG_A")); KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW, ("pmap_demote_pdpe: oldpdpe is missing PG_M")); newpde = oldpdpe; /* * Initialize the page directory page. */ for (pde = firstpde; pde < firstpde + NPDEPG; pde++) { *pde = newpde; newpde += NBPDR; } /* * Demote the mapping. */ *pdpe = newpdpe; /* * Invalidate a stale recursive mapping of the page directory page. */ pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va)); pmap_pdpe_demotions++; CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx" " in pmap %p", va, pmap); return (TRUE); } /* * Sets the memory attribute for the specified page. */ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { m->md.pat_mode = ma; /* * If "m" is a normal page, update its direct mapping. This update * can be relied upon to perform any cache operations that are * required for data coherence. */ if ((m->flags & PG_FICTITIOUS) == 0 && pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, m->md.pat_mode)) panic("memory attribute change on the direct map failed"); } /* * Changes the specified virtual address range's memory type to that given by * the parameter "mode". The specified virtual address range must be * completely contained within either the direct map or the kernel map. If * the virtual address range is contained within the kernel map, then the * memory type for each of the corresponding ranges of the direct map is also * changed. (The corresponding ranges of the direct map are those ranges that * map the same physical pages as the specified virtual address range.) These * changes to the direct map are necessary because Intel describes the * behavior of their processors as "undefined" if two or more mappings to the * same physical page have different memory types. * * Returns zero if the change completed successfully, and either EINVAL or * ENOMEM if the change failed. Specifically, EINVAL is returned if some part * of the virtual address range was not mapped, and ENOMEM is returned if * there was insufficient memory available to complete the change. In the * latter case, the memory type may have been changed on some part of the * virtual address range or the direct map. */ int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) { int error; PMAP_LOCK(kernel_pmap); error = pmap_change_attr_locked(va, size, mode); PMAP_UNLOCK(kernel_pmap); return (error); } static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode) { vm_offset_t base, offset, tmpva; vm_paddr_t pa_start, pa_end, pa_end1; pdp_entry_t *pdpe; pd_entry_t *pde; pt_entry_t *pte; int cache_bits_pte, cache_bits_pde, error; boolean_t changed; PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); base = trunc_page(va); offset = va & PAGE_MASK; size = round_page(offset + size); /* * Only supported on kernel virtual addresses, including the direct * map but excluding the recursive map. */ if (base < DMAP_MIN_ADDRESS) return (EINVAL); cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); changed = FALSE; /* * Pages that aren't mapped aren't supported. Also break down 2MB pages * into 4KB pages if required. */ for (tmpva = base; tmpva < base + size; ) { pdpe = pmap_pdpe(kernel_pmap, tmpva); if (pdpe == NULL || *pdpe == 0) return (EINVAL); if (*pdpe & PG_PS) { /* * If the current 1GB page already has the required * memory type, then we need not demote this page. Just * increment tmpva to the next 1GB page frame. */ if ((*pdpe & X86_PG_PDE_CACHE) == cache_bits_pde) { tmpva = trunc_1gpage(tmpva) + NBPDP; continue; } /* * If the current offset aligns with a 1GB page frame * and there is at least 1GB left within the range, then * we need not break down this page into 2MB pages. */ if ((tmpva & PDPMASK) == 0 && tmpva + PDPMASK < base + size) { tmpva += NBPDP; continue; } if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva)) return (ENOMEM); } pde = pmap_pdpe_to_pde(pdpe, tmpva); if (*pde == 0) return (EINVAL); if (*pde & PG_PS) { /* * If the current 2MB page already has the required * memory type, then we need not demote this page. Just * increment tmpva to the next 2MB page frame. */ if ((*pde & X86_PG_PDE_CACHE) == cache_bits_pde) { tmpva = trunc_2mpage(tmpva) + NBPDR; continue; } /* * If the current offset aligns with a 2MB page frame * and there is at least 2MB left within the range, then * we need not break down this page into 4KB pages. */ if ((tmpva & PDRMASK) == 0 && tmpva + PDRMASK < base + size) { tmpva += NBPDR; continue; } if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) return (ENOMEM); } pte = pmap_pde_to_pte(pde, tmpva); if (*pte == 0) return (EINVAL); tmpva += PAGE_SIZE; } error = 0; /* * Ok, all the pages exist, so run through them updating their * cache mode if required. */ pa_start = pa_end = 0; for (tmpva = base; tmpva < base + size; ) { pdpe = pmap_pdpe(kernel_pmap, tmpva); if (*pdpe & PG_PS) { if ((*pdpe & X86_PG_PDE_CACHE) != cache_bits_pde) { pmap_pde_attr(pdpe, cache_bits_pde, X86_PG_PDE_CACHE); changed = TRUE; } if (tmpva >= VM_MIN_KERNEL_ADDRESS && (*pdpe & PG_PS_FRAME) < dmaplimit) { if (pa_start == pa_end) { /* Start physical address run. */ pa_start = *pdpe & PG_PS_FRAME; pa_end = pa_start + NBPDP; } else if (pa_end == (*pdpe & PG_PS_FRAME)) pa_end += NBPDP; else { /* Run ended, update direct map. */ error = pmap_change_attr_locked( PHYS_TO_DMAP(pa_start), pa_end - pa_start, mode); if (error != 0) break; /* Start physical address run. */ pa_start = *pdpe & PG_PS_FRAME; pa_end = pa_start + NBPDP; } } tmpva = trunc_1gpage(tmpva) + NBPDP; continue; } pde = pmap_pdpe_to_pde(pdpe, tmpva); if (*pde & PG_PS) { if ((*pde & X86_PG_PDE_CACHE) != cache_bits_pde) { pmap_pde_attr(pde, cache_bits_pde, X86_PG_PDE_CACHE); changed = TRUE; } if (tmpva >= VM_MIN_KERNEL_ADDRESS && (*pde & PG_PS_FRAME) < dmaplimit) { if (pa_start == pa_end) { /* Start physical address run. */ pa_start = *pde & PG_PS_FRAME; pa_end = pa_start + NBPDR; } else if (pa_end == (*pde & PG_PS_FRAME)) pa_end += NBPDR; else { /* Run ended, update direct map. */ error = pmap_change_attr_locked( PHYS_TO_DMAP(pa_start), pa_end - pa_start, mode); if (error != 0) break; /* Start physical address run. */ pa_start = *pde & PG_PS_FRAME; pa_end = pa_start + NBPDR; } } tmpva = trunc_2mpage(tmpva) + NBPDR; } else { pte = pmap_pde_to_pte(pde, tmpva); if ((*pte & X86_PG_PTE_CACHE) != cache_bits_pte) { pmap_pte_attr(pte, cache_bits_pte, X86_PG_PTE_CACHE); changed = TRUE; } if (tmpva >= VM_MIN_KERNEL_ADDRESS && (*pte & PG_FRAME) < dmaplimit) { if (pa_start == pa_end) { /* Start physical address run. */ pa_start = *pte & PG_FRAME; pa_end = pa_start + PAGE_SIZE; } else if (pa_end == (*pte & PG_FRAME)) pa_end += PAGE_SIZE; else { /* Run ended, update direct map. */ error = pmap_change_attr_locked( PHYS_TO_DMAP(pa_start), pa_end - pa_start, mode); if (error != 0) break; /* Start physical address run. */ pa_start = *pte & PG_FRAME; pa_end = pa_start + PAGE_SIZE; } } tmpva += PAGE_SIZE; } } if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) { pa_end1 = MIN(pa_end, dmaplimit); if (pa_start != pa_end1) error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start), pa_end1 - pa_start, mode); } /* * Flush CPU caches if required to make sure any data isn't cached that * shouldn't be, etc. */ if (changed) { pmap_invalidate_range(kernel_pmap, base, tmpva); pmap_invalidate_cache_range(base, tmpva, FALSE); } return (error); } /* * Demotes any mapping within the direct map region that covers more than the * specified range of physical addresses. This range's size must be a power * of two and its starting address must be a multiple of its size. Since the * demotion does not change any attributes of the mapping, a TLB invalidation * is not mandatory. The caller may, however, request a TLB invalidation. */ void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate) { pdp_entry_t *pdpe; pd_entry_t *pde; vm_offset_t va; boolean_t changed; if (len == 0) return; KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2")); KASSERT((base & (len - 1)) == 0, ("pmap_demote_DMAP: base is not a multiple of len")); if (len < NBPDP && base < dmaplimit) { va = PHYS_TO_DMAP(base); changed = FALSE; PMAP_LOCK(kernel_pmap); pdpe = pmap_pdpe(kernel_pmap, va); if ((*pdpe & X86_PG_V) == 0) panic("pmap_demote_DMAP: invalid PDPE"); if ((*pdpe & PG_PS) != 0) { if (!pmap_demote_pdpe(kernel_pmap, pdpe, va)) panic("pmap_demote_DMAP: PDPE failed"); changed = TRUE; } if (len < NBPDR) { pde = pmap_pdpe_to_pde(pdpe, va); if ((*pde & X86_PG_V) == 0) panic("pmap_demote_DMAP: invalid PDE"); if ((*pde & PG_PS) != 0) { if (!pmap_demote_pde(kernel_pmap, pde, va)) panic("pmap_demote_DMAP: PDE failed"); changed = TRUE; } } if (changed && invalidate) pmap_invalidate_page(kernel_pmap, va); PMAP_UNLOCK(kernel_pmap); } } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pd_entry_t *pdep; pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V; vm_paddr_t pa; int val; PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); PMAP_LOCK(pmap); retry: pdep = pmap_pde(pmap, addr); if (pdep != NULL && (*pdep & PG_V)) { if (*pdep & PG_PS) { pte = *pdep; /* Compute the physical address of the 4KB page. */ pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) & PG_FRAME; val = MINCORE_SUPER; } else { pte = *pmap_pde_to_pte(pdep, addr); pa = pte & PG_FRAME; val = 0; } } else { pte = 0; pa = 0; val = 0; } if ((pte & PG_V) != 0) { val |= MINCORE_INCORE; if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((pte & PG_A) != 0) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; } if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) goto retry; } else PA_UNLOCK_COND(*locked_pa); PMAP_UNLOCK(pmap); return (val); } static uint64_t pmap_pcid_alloc(pmap_t pmap, u_int cpuid) { uint32_t gen, new_gen, pcid_next; CRITICAL_ASSERT(curthread); gen = PCPU_GET(pcid_gen); if (pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN || pmap->pm_pcids[cpuid].pm_gen == gen) return (CR3_PCID_SAVE); pcid_next = PCPU_GET(pcid_next); KASSERT(pcid_next <= PMAP_PCID_OVERMAX, ("cpu %d pcid_next %#x", cpuid, pcid_next)); if (pcid_next == PMAP_PCID_OVERMAX) { new_gen = gen + 1; if (new_gen == 0) new_gen = 1; PCPU_SET(pcid_gen, new_gen); pcid_next = PMAP_PCID_KERN + 1; } else { new_gen = gen; } pmap->pm_pcids[cpuid].pm_pcid = pcid_next; pmap->pm_pcids[cpuid].pm_gen = new_gen; PCPU_SET(pcid_next, pcid_next + 1); return (0); } void pmap_activate_sw(struct thread *td) { pmap_t oldpmap, pmap; uint64_t cached, cr3; register_t rflags; u_int cpuid; oldpmap = PCPU_GET(curpmap); pmap = vmspace_pmap(td->td_proc->p_vmspace); if (oldpmap == pmap) return; cpuid = PCPU_GET(cpuid); #ifdef SMP CPU_SET_ATOMIC(cpuid, &pmap->pm_active); #else CPU_SET(cpuid, &pmap->pm_active); #endif cr3 = rcr3(); if (pmap_pcid_enabled) { cached = pmap_pcid_alloc(pmap, cpuid); KASSERT(pmap->pm_pcids[cpuid].pm_pcid >= 0 && pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX, ("pmap %p cpu %d pcid %#x", pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid)); KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN || pmap == kernel_pmap, ("non-kernel pmap thread %p pmap %p cpu %d pcid %#x", td, pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid)); /* * If the INVPCID instruction is not available, * invltlb_pcid_handler() is used for handle * invalidate_all IPI, which checks for curpmap == * smp_tlb_pmap. Below operations sequence has a * window where %CR3 is loaded with the new pmap's * PML4 address, but curpmap value is not yet updated. * This causes invltlb IPI handler, called between the * updates, to execute as NOP, which leaves stale TLB * entries. * * Note that the most typical use of * pmap_activate_sw(), from the context switch, is * immune to this race, because interrupts are * disabled (while the thread lock is owned), and IPI * happends after curpmap is updated. Protect other * callers in a similar way, by disabling interrupts * around the %cr3 register reload and curpmap * assignment. */ if (!invpcid_works) rflags = intr_disable(); if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3) { load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid | cached); if (cached) PCPU_INC(pm_save_cnt); } PCPU_SET(curpmap, pmap); if (!invpcid_works) intr_restore(rflags); } else if (cr3 != pmap->pm_cr3) { load_cr3(pmap->pm_cr3); PCPU_SET(curpmap, pmap); } #ifdef SMP CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); #else CPU_CLR(cpuid, &oldpmap->pm_active); #endif } void pmap_activate(struct thread *td) { critical_enter(); pmap_activate_sw(td); critical_exit(); } void pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) { } /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. */ void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { vm_offset_t superpage_offset; if (size < NBPDR) return; if (object != NULL && (object->flags & OBJ_COLORED) != 0) offset += ptoa(object->pg_color); superpage_offset = offset & PDRMASK; if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || (*addr & PDRMASK) == superpage_offset) return; if ((*addr & PDRMASK) < superpage_offset) *addr = (*addr & ~PDRMASK) + superpage_offset; else *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; } #ifdef INVARIANTS static unsigned long num_dirty_emulations; SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW, &num_dirty_emulations, 0, NULL); static unsigned long num_accessed_emulations; SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW, &num_accessed_emulations, 0, NULL); static unsigned long num_superpage_accessed_emulations; SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW, &num_superpage_accessed_emulations, 0, NULL); static unsigned long ad_emulation_superpage_promotions; SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW, &ad_emulation_superpage_promotions, 0, NULL); #endif /* INVARIANTS */ int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype) { int rv; struct rwlock *lock; vm_page_t m, mpte; pd_entry_t *pde; pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V; KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE, ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype)); if (!pmap_emulate_ad_bits(pmap)) return (-1); PG_A = pmap_accessed_bit(pmap); PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); rv = -1; lock = NULL; PMAP_LOCK(pmap); pde = pmap_pde(pmap, va); if (pde == NULL || (*pde & PG_V) == 0) goto done; if ((*pde & PG_PS) != 0) { if (ftype == VM_PROT_READ) { #ifdef INVARIANTS atomic_add_long(&num_superpage_accessed_emulations, 1); #endif *pde |= PG_A; rv = 0; } goto done; } pte = pmap_pde_to_pte(pde, va); if ((*pte & PG_V) == 0) goto done; if (ftype == VM_PROT_WRITE) { if ((*pte & PG_RW) == 0) goto done; /* * Set the modified and accessed bits simultaneously. * * Intel EPT PTEs that do software emulation of A/D bits map * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively. * An EPT misconfiguration is triggered if the PTE is writable * but not readable (WR=10). This is avoided by setting PG_A * and PG_M simultaneously. */ *pte |= PG_M | PG_A; } else { *pte |= PG_A; } /* try to promote the mapping */ if (va < VM_MAXUSER_ADDRESS) mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); else mpte = NULL; m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); if ((mpte == NULL || mpte->wire_count == NPTEPG) && pmap_ps_enabled(pmap) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { pmap_promote_pde(pmap, pde, va, &lock); #ifdef INVARIANTS atomic_add_long(&ad_emulation_superpage_promotions, 1); #endif } #ifdef INVARIANTS if (ftype == VM_PROT_WRITE) atomic_add_long(&num_dirty_emulations, 1); else atomic_add_long(&num_accessed_emulations, 1); #endif rv = 0; /* success */ done: if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); return (rv); } void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num) { pml4_entry_t *pml4; pdp_entry_t *pdp; pd_entry_t *pde; pt_entry_t *pte, PG_V; int idx; idx = 0; PG_V = pmap_valid_bit(pmap); PMAP_LOCK(pmap); pml4 = pmap_pml4e(pmap, va); ptr[idx++] = *pml4; if ((*pml4 & PG_V) == 0) goto done; pdp = pmap_pml4e_to_pdpe(pml4, va); ptr[idx++] = *pdp; if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) goto done; pde = pmap_pdpe_to_pde(pdp, va); ptr[idx++] = *pde; if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) goto done; pte = pmap_pde_to_pte(pde, va); ptr[idx++] = *pte; done: PMAP_UNLOCK(pmap); *num = idx; } /** * Get the kernel virtual address of a set of physical pages. If there are * physical addresses not covered by the DMAP perform a transient mapping * that will be removed when calling pmap_unmap_io_transient. * * \param page The pages the caller wishes to obtain the virtual * address on the kernel memory map. * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. * \param can_fault TRUE if the thread using the mapped pages can take * page faults, FALSE otherwise. * * \returns TRUE if the caller must call pmap_unmap_io_transient when * finished or FALSE otherwise. * */ boolean_t pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, boolean_t can_fault) { vm_paddr_t paddr; boolean_t needs_mapping; pt_entry_t *pte; int cache_bits, error, i; /* * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ needs_mapping = FALSE; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(paddr >= dmaplimit)) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); needs_mapping = TRUE; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } } /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) return (FALSE); /* * NB: The sequence of updating a page table followed by accesses * to the corresponding pages used in the !DMAP case is subject to * the situation described in the "AMD64 Architecture Programmer's * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special * Coherency Considerations". Therefore, issuing the INVLPG right * after modifying the PTE bits is crucial. */ if (!can_fault) sched_pin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (paddr >= dmaplimit) { if (can_fault) { /* * Slow path, since we can get page faults * while mappings are active don't pin the * thread to the CPU and instead add a global * mapping visible to all CPUs. */ pmap_qenter(vaddr[i], &page[i], 1); } else { pte = vtopte(vaddr[i]); cache_bits = pmap_cache_bits(kernel_pmap, page[i]->md.pat_mode, 0); pte_store(pte, paddr | X86_PG_RW | X86_PG_V | cache_bits); invlpg(vaddr[i]); } } } return (needs_mapping); } void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, boolean_t can_fault) { vm_paddr_t paddr; int i; if (!can_fault) sched_unpin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (paddr >= dmaplimit) { if (can_fault) pmap_qremove(vaddr[i], 1); vmem_free(kernel_arena, vaddr[i], PAGE_SIZE); } } } vm_offset_t pmap_quick_enter_page(vm_page_t m) { vm_paddr_t paddr; paddr = VM_PAGE_TO_PHYS(m); if (paddr < dmaplimit) return (PHYS_TO_DMAP(paddr)); mtx_lock_spin(&qframe_mtx); KASSERT(*vtopte(qframe) == 0, ("qframe busy")); pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A | X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0)); return (qframe); } void pmap_quick_remove_page(vm_offset_t addr) { if (addr != qframe) return; pte_store(vtopte(qframe), 0); invlpg(qframe); mtx_unlock_spin(&qframe_mtx); } #include "opt_ddb.h" #ifdef DDB #include #include DB_SHOW_COMMAND(pte, pmap_print_pte) { pmap_t pmap; pml4_entry_t *pml4; pdp_entry_t *pdp; pd_entry_t *pde; pt_entry_t *pte, PG_V; vm_offset_t va; if (!have_addr) { db_printf("show pte addr\n"); return; } va = (vm_offset_t)addr; if (kdb_thread != NULL) pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace); else pmap = PCPU_GET(curpmap); PG_V = pmap_valid_bit(pmap); pml4 = pmap_pml4e(pmap, va); db_printf("VA %#016lx pml4e %#016lx", va, *pml4); if ((*pml4 & PG_V) == 0) { db_printf("\n"); return; } pdp = pmap_pml4e_to_pdpe(pml4, va); db_printf(" pdpe %#016lx", *pdp); if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) { db_printf("\n"); return; } pde = pmap_pdpe_to_pde(pdp, va); db_printf(" pde %#016lx", *pde); if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) { db_printf("\n"); return; } pte = pmap_pde_to_pte(pde, va); db_printf(" pte %#016lx\n", *pte); } DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap) { vm_paddr_t a; if (have_addr) { a = (vm_paddr_t)addr; db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a)); } else { db_printf("show phys2dmap addr\n"); } } #endif Index: projects/clang500-import/sys/arm/annapurna/alpine/files.alpine =================================================================== --- projects/clang500-import/sys/arm/annapurna/alpine/files.alpine (revision 319548) +++ projects/clang500-import/sys/arm/annapurna/alpine/files.alpine (revision 319549) @@ -1,11 +1,10 @@ # $FreeBSD$ kern/kern_clocksource.c standard arm/versatile/sp804.c standard -arm/versatile/versatile_timer.c standard dev/uart/uart_dev_ns8250.c optional uart arm/annapurna/alpine/common.c standard arm/annapurna/alpine/alpine_machdep.c standard arm/annapurna/alpine/alpine_machdep_mp.c optional smp Index: projects/clang500-import/sys/arm/arm/machdep.c =================================================================== --- projects/clang500-import/sys/arm/arm/machdep.c (revision 319548) +++ projects/clang500-import/sys/arm/arm/machdep.c (revision 319549) @@ -1,1207 +1,1217 @@ /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Machine dependent functions for kernel setup * * Created : 17/09/94 * Updated : 18/04/01 updated for new wscons */ #include "opt_compat.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include "opt_sched.h" #include "opt_timer.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else #define debugf(fmt, args...) #endif #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \ defined(COMPAT_FREEBSD9) #error FreeBSD/arm doesn't provide compatibility with releases prior to 10 #endif +#if __ARM_ARCH >= 6 && !defined(INTRNG) +#error armv6 requires INTRNG +#endif + struct pcpu __pcpu[MAXCPU]; struct pcpu *pcpup = &__pcpu[0]; static struct trapframe proc0_tf; uint32_t cpu_reset_address = 0; int cold = 1; vm_offset_t vector_page; int (*_arm_memcpy)(void *, void *, int, int) = NULL; int (*_arm_bzero)(void *, int, int) = NULL; int _min_memcpy_size = 0; int _min_bzero_size = 0; extern int *end; #ifdef FDT vm_paddr_t pmap_pa; #if __ARM_ARCH >= 6 vm_offset_t systempage; vm_offset_t irqstack; vm_offset_t undstack; vm_offset_t abtstack; #else /* * This is the number of L2 page tables required for covering max * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, * stacks etc.), uprounded to be divisible by 4. */ #define KERNEL_PT_MAX 78 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; struct pv_addr systempage; static struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; static struct pv_addr kernelstack; #endif /* __ARM_ARCH >= 6 */ #endif /* FDT */ #ifdef MULTIDELAY static delay_func *delay_impl; static void *delay_arg; #endif struct kva_md_info kmi; /* * arm32_vector_init: * * Initialize the vector page, and select whether or not to * relocate the vectors. * * NOTE: We expect the vector page to be mapped at its expected * destination. */ extern unsigned int page0[], page0_data[]; void arm_vector_init(vm_offset_t va, int which) { unsigned int *vectors = (int *) va; unsigned int *vectors_data = vectors + (page0_data - page0); int vec; /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < ARM_NVEC; vec++) { if ((which & (1 << vec)) == 0) { /* Don't want to take over this vector. */ continue; } vectors[vec] = page0[vec]; vectors_data[vec] = page0_data[vec]; } /* Now sync the vectors. */ icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int)); vector_page = va; #if __ARM_ARCH < 6 if (va == ARM_VECTORS_HIGH) { /* * Enable high vectors in the system control reg (SCTLR). * * Assume the MD caller knows what it's doing here, and really * does want the vector page relocated. * * Note: This has to be done here (and not just in * cpu_setup()) because the vector page needs to be * accessible *before* cpu_startup() is called. * Think ddb(9) ... */ cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); } #endif } static void cpu_startup(void *dummy) { struct pcb *pcb = thread0.td_pcb; const unsigned int mbyte = 1024 * 1024; #if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE) vm_page_t m; #endif identify_arm_cpu(); vm_ksubmap_init(&kmi); /* * Display the RAM layout. */ printf("real memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(realmem), (uintmax_t)arm32_ptob(realmem) / mbyte); printf("avail memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(vm_cnt.v_free_count), (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte); if (bootverbose) { arm_physmem_print_tables(); devmap_print_table(); } bufinit(); vm_pager_bufferinit(); pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + USPACE_SVC_STACK_TOP; pmap_set_pcb_pagedir(kernel_pmap, pcb); #if __ARM_ARCH < 6 vector_page_setprot(VM_PROT_READ); pmap_postinit(); #ifdef ARM_CACHE_LOCK_ENABLE pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); arm_lock_cache_line(ARM_TP_ADDRESS); #else m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); #endif *(uint32_t *)ARM_RAS_START = 0; *(uint32_t *)ARM_RAS_END = 0xffffffff; #endif } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len); } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { return (ENXIO); } void cpu_idle(int busy) { CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); spinlock_enter(); #ifndef NO_EVENTTIMERS if (!busy) cpu_idleclock(); #endif if (!sched_runnable()) cpu_sleep(0); #ifndef NO_EVENTTIMERS if (!busy) cpu_activeclock(); #endif spinlock_exit(); CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); } int cpu_idle_wakeup(int cpu) { return (0); } +#ifdef NO_EVENTTIMERS /* * Most ARM platforms don't need to do anything special to init their clocks * (they get intialized during normal device attachment), and by not defining a * cpu_initclocks() function they get this generic one. Any platform that needs * to do something special can just provide their own implementation, which will * override this one due to the weak linkage. */ void arm_generic_initclocks(void) { +} +__weak_reference(arm_generic_initclocks, cpu_initclocks); -#ifndef NO_EVENTTIMERS +#else +void +cpu_initclocks(void) +{ + #ifdef SMP if (PCPU_GET(cpuid) == 0) cpu_initclocks_bsp(); else cpu_initclocks_ap(); #else cpu_initclocks_bsp(); #endif -#endif } -__weak_reference(arm_generic_initclocks, cpu_initclocks); +#endif #ifdef MULTIDELAY void arm_set_delay(delay_func *impl, void *arg) { KASSERT(impl != NULL, ("No DELAY implementation")); delay_impl = impl; delay_arg = arg; } void DELAY(int usec) { delay_impl(usec, delay_arg); } #endif void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; register_t cspr; td = curthread; if (td->td_md.md_spinlock_count == 0) { cspr = disable_interrupts(PSR_I | PSR_F); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = cspr; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t cspr; td = curthread; critical_exit(); cspr = td->td_md.md_saved_cspr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) restore_interrupts(cspr); } /* * Clear registers on exec */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(*tf)); tf->tf_usr_sp = stack; tf->tf_usr_lr = imgp->entry_addr; tf->tf_svc_lr = 0x77777777; tf->tf_pc = imgp->entry_addr; tf->tf_spsr = PSR_USR32_MODE; } #ifdef VFP /* * Get machine VFP context. */ static void get_vfpcontext(struct thread *td, mcontext_vfp_t *vfp) { struct pcb *curpcb; curpcb = curthread->td_pcb; critical_enter(); vfp_store(&curpcb->pcb_vfpstate, false); memcpy(vfp->mcv_reg, curpcb->pcb_vfpstate.reg, sizeof(vfp->mcv_reg)); vfp->mcv_fpscr = curpcb->pcb_vfpstate.fpscr; critical_exit(); } /* * Set machine VFP context. */ static void set_vfpcontext(struct thread *td, mcontext_vfp_t *vfp) { struct pcb *curpcb; curpcb = curthread->td_pcb; critical_enter(); vfp_discard(td); memcpy(curpcb->pcb_vfpstate.reg, vfp->mcv_reg, sizeof(curpcb->pcb_vfpstate.reg)); curpcb->pcb_vfpstate.fpscr = vfp->mcv_fpscr; critical_exit(); } #endif /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; __greg_t *gr = mcp->__gregs; if (clear_ret & GET_MC_CLEAR_RET) { gr[_REG_R0] = 0; gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C; } else { gr[_REG_R0] = tf->tf_r0; gr[_REG_CPSR] = tf->tf_spsr; } gr[_REG_R1] = tf->tf_r1; gr[_REG_R2] = tf->tf_r2; gr[_REG_R3] = tf->tf_r3; gr[_REG_R4] = tf->tf_r4; gr[_REG_R5] = tf->tf_r5; gr[_REG_R6] = tf->tf_r6; gr[_REG_R7] = tf->tf_r7; gr[_REG_R8] = tf->tf_r8; gr[_REG_R9] = tf->tf_r9; gr[_REG_R10] = tf->tf_r10; gr[_REG_R11] = tf->tf_r11; gr[_REG_R12] = tf->tf_r12; gr[_REG_SP] = tf->tf_usr_sp; gr[_REG_LR] = tf->tf_usr_lr; gr[_REG_PC] = tf->tf_pc; mcp->mc_vfp_size = 0; mcp->mc_vfp_ptr = NULL; memset(&mcp->mc_spare, 0, sizeof(mcp->mc_spare)); return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, mcontext_t *mcp) { mcontext_vfp_t mc_vfp, *vfp; struct trapframe *tf = td->td_frame; const __greg_t *gr = mcp->__gregs; #ifdef WITNESS if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(mc_vfp)) { printf("%s: %s: Malformed mc_vfp_size: %d (0x%08X)\n", td->td_proc->p_comm, __func__, mcp->mc_vfp_size, mcp->mc_vfp_size); } else if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr == NULL) { printf("%s: %s: c_vfp_size != 0 but mc_vfp_ptr == NULL\n", td->td_proc->p_comm, __func__); } #endif if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != NULL) { if (copyin(mcp->mc_vfp_ptr, &mc_vfp, sizeof(mc_vfp)) != 0) return (EFAULT); vfp = &mc_vfp; } else { vfp = NULL; } tf->tf_r0 = gr[_REG_R0]; tf->tf_r1 = gr[_REG_R1]; tf->tf_r2 = gr[_REG_R2]; tf->tf_r3 = gr[_REG_R3]; tf->tf_r4 = gr[_REG_R4]; tf->tf_r5 = gr[_REG_R5]; tf->tf_r6 = gr[_REG_R6]; tf->tf_r7 = gr[_REG_R7]; tf->tf_r8 = gr[_REG_R8]; tf->tf_r9 = gr[_REG_R9]; tf->tf_r10 = gr[_REG_R10]; tf->tf_r11 = gr[_REG_R11]; tf->tf_r12 = gr[_REG_R12]; tf->tf_usr_sp = gr[_REG_SP]; tf->tf_usr_lr = gr[_REG_LR]; tf->tf_pc = gr[_REG_PC]; tf->tf_spsr = gr[_REG_CPSR]; #ifdef VFP if (vfp != NULL) set_vfpcontext(td, vfp); #endif return (0); } void sendsig(catcher, ksi, mask) sig_t catcher; ksiginfo_t *ksi; sigset_t *mask; { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int onstack; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_usr_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe *)td->td_frame->tf_usr_sp; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (struct sigframe *)STACKALIGN(fp); /* Populate the siginfo frame. */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); #ifdef VFP get_vfpcontext(td, &frame.sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_ptr = &fp->sf_vfp; #else frame.sf_uc.uc_mcontext.mc_vfp_size = 0; frame.sf_uc.uc_mcontext.mc_vfp_ptr = NULL; #endif frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ tf->tf_r0 = sig; tf->tf_r1 = (register_t)&fp->sf_si; tf->tf_r2 = (register_t)&fp->sf_uc; /* the trampoline uses r5 as the uc address */ tf->tf_r5 = (register_t)&fp->sf_uc; tf->tf_pc = (register_t)catcher; tf->tf_usr_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_usr_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); /* Set the mode to enter in the signal handler */ #if __ARM_ARCH >= 7 if ((register_t)catcher & 1) tf->tf_spsr |= PSR_T; else tf->tf_spsr &= ~PSR_T; #endif CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, tf->tf_usr_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } int sys_sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; int spsr; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ spsr = uc.uc_mcontext.__gregs[_REG_CPSR]; if ((spsr & PSR_MODE) != PSR_USR32_MODE || (spsr & (PSR_I | PSR_F)) != 0) return (EINVAL); /* Restore register context. */ set_mcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_regs.sf_r4 = tf->tf_r4; pcb->pcb_regs.sf_r5 = tf->tf_r5; pcb->pcb_regs.sf_r6 = tf->tf_r6; pcb->pcb_regs.sf_r7 = tf->tf_r7; pcb->pcb_regs.sf_r8 = tf->tf_r8; pcb->pcb_regs.sf_r9 = tf->tf_r9; pcb->pcb_regs.sf_r10 = tf->tf_r10; pcb->pcb_regs.sf_r11 = tf->tf_r11; pcb->pcb_regs.sf_r12 = tf->tf_r12; pcb->pcb_regs.sf_pc = tf->tf_pc; pcb->pcb_regs.sf_lr = tf->tf_usr_lr; pcb->pcb_regs.sf_sp = tf->tf_usr_sp; } void pcpu0_init(void) { #if __ARM_ARCH >= 6 set_curthread(&thread0); #endif pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); } /* * Initialize proc0 */ void init_proc0(vm_offset_t kstack) { proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_flags = 0; thread0.td_pcb->pcb_vfpcpu = -1; thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } #if __ARM_ARCH >= 6 void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #else void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #endif #ifdef FDT #if __ARM_ARCH < 6 void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; struct pv_addr kernel_l1pt; struct pv_addr dpcpu; vm_offset_t dtbp, freemempos, l2_start, lastaddr; uint64_t memsize; uint32_t l2size; char *env; void *kmdp; u_int l1pagetable; int i, j, err_devmap, mem_regions_sz; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; memsize = 0; cpuinfo_init(); set_cpufuncs(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); else dtbp = (vm_offset_t)NULL; #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* Calculate number of L2 tables needed for mapping vm_page_array */ l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); l2size = (l2size >> L1_S_SHIFT) + 1; /* * Add one table for end of kernel map, one for stacks, msgbuf and * L1 and L2 tables map, one for vectors map and two for * l2 structures from pmap_bootstrap. */ l2size += 5; /* Make it divisible by 4 */ l2size = (l2size + 3) & ~3; freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_va, (np)); \ (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (i = 0, j = 0; i < l2size; ++i) { if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[i], L2_TABLE_SIZE / PAGE_SIZE); j = i; } else { kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + L2_TABLE_SIZE_REAL * (i - j); kernel_pt_table[i].pv_pa = kernel_pt_table[i].pv_va - KERNVIRTADDR + abp->abp_physaddr; } } /* * Allocate a page for the system page mapped to 0x00000000 * or 0xffff0000. This page will just contain the system vectors * and can be shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); valloc_pages(kernelstack, kstack_pages * MAXCPU); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* * Try to map as much as possible of kernel text and data using * 1MB section mapping and for the rest of initial kernel address * space use L2 coarse tables. * * Link L2 tables for mapping remainder of kernel (modulo 1MB) * and kernel structures */ l2_start = lastaddr & ~(L1_S_OFFSET); for (i = 0 ; i < l2size - 1; i++) pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, &kernel_pt_table[i]); pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; /* Map kernel code and data */ pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map L1 directory and allocated L2 page tables */ pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, kernel_pt_table[0].pv_pa, L2_TABLE_SIZE_REAL * l2size, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map allocated DPCPU, stacks and msgbuf */ pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, freemempos - dpcpu.pv_va, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Link and map the vector page */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, &kernel_pt_table[l2size - 1]); pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); devmap_bootstrap(l1pagetable, NULL); vm_max_kernel_address = platform_lastaddr(); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); pmap_pa = kernel_l1pt.pv_pa; cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); arm_print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) { strlcpy(kernelname, env, sizeof(kernelname)); freeenv(env); } if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_bootstrap(freemempos, &kernel_l1pt); msgbufp = (void *)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); dbg_monitor_init(); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } #else /* __ARM_ARCH < 6 */ void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; vm_paddr_t lastaddr; vm_offset_t dtbp, kernelstack, dpcpu; char *env; void *kmdp; int err_devmap, mem_regions_sz; #ifdef EFI struct efi_map_header *efihdr; #endif /* get last allocated physical address */ arm_physmem_kernaddr = abp->abp_physaddr; lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr; set_cpufuncs(); cpuinfo_init(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); #if defined(LINUX_BOOT_ABI) arm_parse_fdt_bootargs(); #endif #ifdef EFI efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) { arm_add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz); } else #endif { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0) panic("Cannot get physical memory regions"); } arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* * Set TEX remapping registers. * Setup kernel page tables and switch to kernel L1 page table. */ pmap_set_tex(); pmap_bootstrap_prepare(lastaddr); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* * Allocate a page for the system page mapped to 0xffff0000 * This page will just contain the system vectors and can be * shared by all processes. */ systempage = pmap_preboot_get_pages(1); /* Map the vector page. */ pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1); if (virtual_end >= ARM_VECTORS_HIGH) virtual_end = ARM_VECTORS_HIGH - 1; /* Allocate dynamic per-cpu area. */ dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate stacks for all modes */ irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU); abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU); undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU ); kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU); /* Allocate message buffer. */ msgbufp = (void *)pmap_preboot_get_vpages( round_page(msgbufsize) / PAGE_SIZE); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); mutex_init(); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); devmap_bootstrap(0, NULL); vm_max_kernel_address = platform_lastaddr(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); debugf(" lastaddr1: 0x%08x\n", lastaddr); arm_print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ /* Set stack for exception handlers */ undefined_init(); init_proc0(kernelstack); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); enable_interrupts(PSR_A); pmap_bootstrap(0); /* Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); /* Init message buffer. */ msgbufinit(msgbufp, msgbufsize); dbg_monitor_init(); kdb_init(); return ((void *)STACKALIGN(thread0.td_pcb)); } #endif /* __ARM_ARCH < 6 */ #endif /* FDT */ Index: projects/clang500-import/sys/arm/arm/mpcore_timer.c =================================================================== --- projects/clang500-import/sys/arm/arm/mpcore_timer.c (revision 319548) +++ projects/clang500-import/sys/arm/arm/mpcore_timer.c (revision 319549) @@ -1,545 +1,557 @@ /*- * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Developed by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * The ARM Cortex-A9 core can support a global timer plus a private and * watchdog timer per core. This driver reserves memory and interrupt * resources for accessing both timer register sets, these resources are * stored globally and used to setup the timecount and eventtimer. * * The timecount timer uses the global 64-bit counter, whereas the * per-CPU eventtimer uses the private 32-bit counters. * * * REF: ARM Cortex-A9 MPCore, Technical Reference Manual (rev. r2p2) */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include +#ifdef MULTIDELAY +#include /* For arm_set_delay */ +#endif + #include #include #include #include #include /* Private (per-CPU) timer register map */ #define PRV_TIMER_LOAD 0x0000 #define PRV_TIMER_COUNT 0x0004 #define PRV_TIMER_CTRL 0x0008 #define PRV_TIMER_INTR 0x000C #define PRV_TIMER_CTR_PRESCALER_SHIFT 8 #define PRV_TIMER_CTRL_IRQ_ENABLE (1UL << 2) #define PRV_TIMER_CTRL_AUTO_RELOAD (1UL << 1) #define PRV_TIMER_CTRL_TIMER_ENABLE (1UL << 0) #define PRV_TIMER_INTR_EVENT (1UL << 0) /* Global timer register map */ #define GBL_TIMER_COUNT_LOW 0x0000 #define GBL_TIMER_COUNT_HIGH 0x0004 #define GBL_TIMER_CTRL 0x0008 #define GBL_TIMER_INTR 0x000C #define GBL_TIMER_CTR_PRESCALER_SHIFT 8 #define GBL_TIMER_CTRL_AUTO_INC (1UL << 3) #define GBL_TIMER_CTRL_IRQ_ENABLE (1UL << 2) #define GBL_TIMER_CTRL_COMP_ENABLE (1UL << 1) #define GBL_TIMER_CTRL_TIMER_ENABLE (1UL << 0) #define GBL_TIMER_INTR_EVENT (1UL << 0) struct arm_tmr_softc { device_t dev; int irqrid; int memrid; struct resource * gbl_mem; struct resource * prv_mem; struct resource * prv_irq; uint64_t clkfreq; struct eventtimer et; }; static struct eventtimer *arm_tmr_et; static struct timecounter *arm_tmr_tc; static uint64_t arm_tmr_freq; static boolean_t arm_tmr_freq_varies; #define tmr_prv_read_4(sc, reg) bus_read_4((sc)->prv_mem, reg) #define tmr_prv_write_4(sc, reg, val) bus_write_4((sc)->prv_mem, reg, val) #define tmr_gbl_read_4(sc, reg) bus_read_4((sc)->gbl_mem, reg) #define tmr_gbl_write_4(sc, reg, val) bus_write_4((sc)->gbl_mem, reg, val) +static void arm_tmr_delay(int, void *); + static timecounter_get_t arm_tmr_get_timecount; static struct timecounter arm_tmr_timecount = { .tc_name = "MPCore", .tc_get_timecount = arm_tmr_get_timecount, .tc_poll_pps = NULL, .tc_counter_mask = ~0u, .tc_frequency = 0, .tc_quality = 800, }; #define TMR_GBL 0x01 #define TMR_PRV 0x02 #define TMR_BOTH (TMR_GBL | TMR_PRV) #define TMR_NONE 0 static struct ofw_compat_data compat_data[] = { {"arm,mpcore-timers", TMR_BOTH}, /* Non-standard, FreeBSD. */ {"arm,cortex-a9-global-timer", TMR_GBL}, {"arm,cortex-a5-global-timer", TMR_GBL}, {"arm,cortex-a9-twd-timer", TMR_PRV}, {"arm,cortex-a5-twd-timer", TMR_PRV}, {"arm,arm11mp-twd-timer", TMR_PRV}, {NULL, TMR_NONE} }; /** * arm_tmr_get_timecount - reads the timecount (global) timer * @tc: pointer to arm_tmr_timecount struct * * We only read the lower 32-bits, the timecount stuff only uses 32-bits * so (for now?) ignore the upper 32-bits. * * RETURNS * The lower 32-bits of the counter. */ static unsigned arm_tmr_get_timecount(struct timecounter *tc) { struct arm_tmr_softc *sc; sc = tc->tc_priv; return (tmr_gbl_read_4(sc, GBL_TIMER_COUNT_LOW)); } /** * arm_tmr_start - starts the eventtimer (private) timer * @et: pointer to eventtimer struct * @first: the number of seconds and fractional sections to trigger in * @period: the period (in seconds and fractional sections) to set * * If the eventtimer is required to be in oneshot mode, period will be * NULL and first will point to the time to trigger. If in periodic mode * period will contain the time period and first may optionally contain * the time for the first period. * * RETURNS * Always returns 0 */ static int arm_tmr_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct arm_tmr_softc *sc; uint32_t load, count; uint32_t ctrl; sc = et->et_priv; tmr_prv_write_4(sc, PRV_TIMER_CTRL, 0); tmr_prv_write_4(sc, PRV_TIMER_INTR, PRV_TIMER_INTR_EVENT); ctrl = PRV_TIMER_CTRL_IRQ_ENABLE | PRV_TIMER_CTRL_TIMER_ENABLE; if (period != 0) { load = ((uint32_t)et->et_frequency * period) >> 32; ctrl |= PRV_TIMER_CTRL_AUTO_RELOAD; } else load = 0; if (first != 0) count = (uint32_t)((et->et_frequency * first) >> 32); else count = load; tmr_prv_write_4(sc, PRV_TIMER_LOAD, load); tmr_prv_write_4(sc, PRV_TIMER_COUNT, count); tmr_prv_write_4(sc, PRV_TIMER_CTRL, ctrl); return (0); } /** * arm_tmr_stop - stops the eventtimer (private) timer * @et: pointer to eventtimer struct * * Simply stops the private timer by clearing all bits in the ctrl register. * * RETURNS * Always returns 0 */ static int arm_tmr_stop(struct eventtimer *et) { struct arm_tmr_softc *sc; sc = et->et_priv; tmr_prv_write_4(sc, PRV_TIMER_CTRL, 0); tmr_prv_write_4(sc, PRV_TIMER_INTR, PRV_TIMER_INTR_EVENT); return (0); } /** * arm_tmr_intr - ISR for the eventtimer (private) timer * @arg: pointer to arm_tmr_softc struct * * Clears the event register and then calls the eventtimer callback. * * RETURNS * Always returns FILTER_HANDLED */ static int arm_tmr_intr(void *arg) { struct arm_tmr_softc *sc; sc = arg; tmr_prv_write_4(sc, PRV_TIMER_INTR, PRV_TIMER_INTR_EVENT); if (sc->et.et_active) sc->et.et_event_cb(&sc->et, sc->et.et_arg); return (FILTER_HANDLED); } /** * arm_tmr_probe - timer probe routine * @dev: new device * * The probe function returns success when probed with the fdt compatible * string set to "arm,mpcore-timers". * * RETURNS * BUS_PROBE_DEFAULT if the fdt device is compatible, otherwise ENXIO. */ static int arm_tmr_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == TMR_NONE) return (ENXIO); device_set_desc(dev, "ARM MPCore Timers"); return (BUS_PROBE_DEFAULT); } static int attach_tc(struct arm_tmr_softc *sc) { int rid; if (arm_tmr_tc != NULL) return (EBUSY); rid = sc->memrid; sc->gbl_mem = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->gbl_mem == NULL) { device_printf(sc->dev, "could not allocate gbl mem resources\n"); return (ENXIO); } tmr_gbl_write_4(sc, GBL_TIMER_CTRL, 0x00000000); arm_tmr_timecount.tc_frequency = sc->clkfreq; arm_tmr_timecount.tc_priv = sc; tc_init(&arm_tmr_timecount); arm_tmr_tc = &arm_tmr_timecount; tmr_gbl_write_4(sc, GBL_TIMER_CTRL, GBL_TIMER_CTRL_TIMER_ENABLE); return (0); } static int attach_et(struct arm_tmr_softc *sc) { void *ihl; int irid, mrid; if (arm_tmr_et != NULL) return (EBUSY); mrid = sc->memrid; sc->prv_mem = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &mrid, RF_ACTIVE); if (sc->prv_mem == NULL) { device_printf(sc->dev, "could not allocate prv mem resources\n"); return (ENXIO); } tmr_prv_write_4(sc, PRV_TIMER_CTRL, 0x00000000); irid = sc->irqrid; sc->prv_irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irid, RF_ACTIVE); if (sc->prv_irq == NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, mrid, sc->prv_mem); device_printf(sc->dev, "could not allocate prv irq resources\n"); return (ENXIO); } if (bus_setup_intr(sc->dev, sc->prv_irq, INTR_TYPE_CLK, arm_tmr_intr, NULL, sc, &ihl) != 0) { bus_release_resource(sc->dev, SYS_RES_MEMORY, mrid, sc->prv_mem); bus_release_resource(sc->dev, SYS_RES_IRQ, irid, sc->prv_irq); device_printf(sc->dev, "unable to setup the et irq handler.\n"); return (ENXIO); } /* * Setup and register the eventtimer. Most event timers set their min * and max period values to some value calculated from the clock * frequency. We might not know yet what our runtime clock frequency * will be, so we just use some safe values. A max of 2 seconds ensures * that even if our base clock frequency is 2GHz (meaning a 4GHz CPU), * we won't overflow our 32-bit timer count register. A min of 20 * nanoseconds is pretty much completely arbitrary. */ sc->et.et_name = "MPCore"; sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; sc->et.et_quality = 1000; sc->et.et_frequency = sc->clkfreq; sc->et.et_min_period = 20 * SBT_1NS; sc->et.et_max_period = 2 * SBT_1S; sc->et.et_start = arm_tmr_start; sc->et.et_stop = arm_tmr_stop; sc->et.et_priv = sc; et_register(&sc->et); arm_tmr_et = &sc->et; return (0); } /** * arm_tmr_attach - attaches the timer to the simplebus * @dev: new device * * Reserves memory and interrupt resources, stores the softc structure * globally and registers both the timecount and eventtimer objects. * * RETURNS * Zero on success or ENXIO if an error occuried. */ static int arm_tmr_attach(device_t dev) { struct arm_tmr_softc *sc; phandle_t node; pcell_t clock; int et_err, tc_err, tmrtype; sc = device_get_softc(dev); sc->dev = dev; if (arm_tmr_freq_varies) { sc->clkfreq = arm_tmr_freq; } else { if (arm_tmr_freq != 0) { sc->clkfreq = arm_tmr_freq; } else { /* Get the base clock frequency */ node = ofw_bus_get_node(dev); if ((OF_getencprop(node, "clock-frequency", &clock, sizeof(clock))) <= 0) { device_printf(dev, "missing clock-frequency " "attribute in FDT\n"); return (ENXIO); } sc->clkfreq = clock; } } tmrtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; tc_err = ENXIO; et_err = ENXIO; /* * If we're handling the global timer and it is fixed-frequency, set it * up to use as a timecounter. If it's variable frequency it won't work * as a timecounter. We also can't use it for DELAY(), so hopefully the * platform provides its own implementation. If it doesn't, ours will * get used, but since the frequency isn't set, it will only use the * bogus loop counter. */ if (tmrtype & TMR_GBL) { if (!arm_tmr_freq_varies) tc_err = attach_tc(sc); else if (bootverbose) device_printf(sc->dev, "not using variable-frequency device as timecounter"); sc->memrid++; sc->irqrid++; } /* If we are handling the private timer, set it up as an eventtimer. */ if (tmrtype & TMR_PRV) { et_err = attach_et(sc); } /* * If we didn't successfully set up a timecounter or eventtimer then we * didn't actually attach at all, return error. */ if (tc_err != 0 && et_err != 0) { return (ENXIO); } + +#ifdef MULTIDELAY + arm_set_delay(arm_tmr_delay, sc); +#endif + return (0); } static device_method_t arm_tmr_methods[] = { DEVMETHOD(device_probe, arm_tmr_probe), DEVMETHOD(device_attach, arm_tmr_attach), { 0, 0 } }; static driver_t arm_tmr_driver = { "mp_tmr", arm_tmr_methods, sizeof(struct arm_tmr_softc), }; static devclass_t arm_tmr_devclass; EARLY_DRIVER_MODULE(mp_tmr, simplebus, arm_tmr_driver, arm_tmr_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(mp_tmr, ofwbus, arm_tmr_driver, arm_tmr_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); /* * Handle a change in clock frequency. The mpcore timer runs at half the CPU * frequency. When the CPU frequency changes due to power-saving or thermal * management, the platform-specific code that causes the frequency change calls * this routine to inform the clock driver, and we in turn inform the event * timer system, which actually updates the value in et->frequency for us and * reschedules the current event(s) in a way that's atomic with respect to * start/stop/intr code that may be running on various CPUs at the time of the * call. * * This routine can also be called by a platform's early init code. If the * value passed is ARM_TMR_FREQUENCY_VARIES, that will cause the attach() code * to register as an eventtimer, but not a timecounter. If the value passed in * is any other non-zero value it is used as the fixed frequency for the timer. */ void arm_tmr_change_frequency(uint64_t newfreq) { if (newfreq == ARM_TMR_FREQUENCY_VARIES) { arm_tmr_freq_varies = true; return; } arm_tmr_freq = newfreq; if (arm_tmr_et != NULL) et_change_frequency(arm_tmr_et, newfreq); } -/** - * DELAY - Delay for at least usec microseconds. - * @usec: number of microseconds to delay by - * - * This function is called all over the kernel and is suppose to provide a - * consistent delay. This function may also be called before the console - * is setup so no printf's can be called here. - * - * RETURNS: - * nothing - */ -static void __used /* Must emit function code for the weak ref below. */ -arm_tmr_DELAY(int usec) +static void +arm_tmr_delay(int usec, void *arg) { - struct arm_tmr_softc *sc; + struct arm_tmr_softc *sc = arg; int32_t counts_per_usec; int32_t counts; uint32_t first, last; - /* Check the timers are setup, if not just use a for loop for the meantime */ - if (arm_tmr_tc == NULL || arm_tmr_timecount.tc_frequency == 0) { - for (; usec > 0; usec--) - for (counts = 200; counts > 0; counts--) - cpufunc_nullop(); /* Prevent gcc from optimizing - * out the loop - */ - return; - } - - sc = arm_tmr_tc->tc_priv; - /* Get the number of times to count */ counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1); /* * Clamp the timeout at a maximum value (about 32 seconds with * a 66MHz clock). *Nobody* should be delay()ing for anywhere * near that length of time and if they are, they should be hung * out to dry. */ if (usec >= (0x80000000U / counts_per_usec)) counts = (0x80000000U / counts_per_usec) - 1; else counts = usec * counts_per_usec; first = tmr_gbl_read_4(sc, GBL_TIMER_COUNT_LOW); while (counts > 0) { last = tmr_gbl_read_4(sc, GBL_TIMER_COUNT_LOW); counts -= (int32_t)(last - first); first = last; } } -/* - * Supply a DELAY() implementation via weak linkage. A platform may want to use - * the mpcore per-cpu eventtimers but provide its own DELAY() routine, - * especially when the core frequency can change on the fly. +#ifndef MULTIDELAY +/** + * DELAY - Delay for at least usec microseconds. + * @usec: number of microseconds to delay by + * + * This function is called all over the kernel and is suppose to provide a + * consistent delay. This function may also be called before the console + * is setup so no printf's can be called here. + * + * RETURNS: + * nothing */ -__weak_reference(arm_tmr_DELAY, DELAY); +void +DELAY(int usec) +{ + struct arm_tmr_softc *sc; + int32_t counts; + /* Check the timers are setup, if not just use a for loop for the meantime */ + if (arm_tmr_tc == NULL || arm_tmr_timecount.tc_frequency == 0) { + for (; usec > 0; usec--) + for (counts = 200; counts > 0; counts--) + cpufunc_nullop(); /* Prevent gcc from optimizing + * out the loop + */ + } else { + sc = arm_tmr_tc->tc_priv; + arm_tmr_delay(usec, sc); + } +} +#endif Index: projects/clang500-import/sys/arm/conf/AML8726 =================================================================== --- projects/clang500-import/sys/arm/conf/AML8726 (revision 319548) +++ projects/clang500-import/sys/arm/conf/AML8726 (revision 319549) @@ -1,98 +1,101 @@ # # Kernel configuration for Amlogic aml8726 boards. # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ +# TODO: Port to INTRNG +#NO_UNIVERSE + ident AML8726 include "std.armv6" include "../amlogic/aml8726/std.aml8726" options SCHED_ULE # ULE scheduler options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options LINUX_BOOT_ABI # NFS root from boopt/dhcp #options BOOTP #options BOOTP_NFSROOT #options BOOTP_COMPAT #options BOOTP_NFSV3 #options BOOTP_WIRED_TO=axe0 # Interrupt controller device aml_pic # MMC/SD/SDIO Card slot support device mmc # mmc/sd bus device mmcsd # mmc/sd flash cards # Boot device is 2nd slice on MMC/SD card options ROOTDEVNAME=\"ufs:mmcsd0s2\" device pl310 # PL310 L2 cache controller # GPIO device gpio device gpioled # I2C support device iicbus device iicbb device iic # vt is the default console driver, resembling an SCO console device vt #device kbdmux # Serial (COM) ports device uart # Generic UART driver # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support device pty # BSD-style compatibility pseudo ttys # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support device usb # General USB code (mandatory for USB) device dwcotg # DWC OTG controller options USB_HOST_ALIGN=64 # Align usb buffers to cache line size. #device ukbd # USB keyboard #device ums # USB mouse device scbus # SCSI bus (required for ATA/SCSI) device da # Direct Access (disks) device umass # Disks/Mass storage - Requires scbus and da # Ethernet support device miibus # MII bus support # SoC Ethernet, requires miibus device dwc # USB Ethernet support, requires miibus device axe # ASIX Electronics USB Ethernet # Flattened Device Tree options FDT # Configure using FDT/DTB data Index: projects/clang500-import/sys/arm/conf/ARMADAXP =================================================================== --- projects/clang500-import/sys/arm/conf/ARMADAXP (revision 319548) +++ projects/clang500-import/sys/arm/conf/ARMADAXP (revision 319549) @@ -1,87 +1,90 @@ # # Custom kernel for Marvell Armada XP # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ +# TODO: Port to INTRNG +#NO_UNIVERSE + ident MV-88F78XX0 include "std.armv6" include "../mv/armadaxp/std.mv78x60" options SOC_MV_ARMADAXP makeoptions WERROR="-Werror" options SCHED_ULE # ULE scheduler options SMP # Enable multiple cores # NFS root from boopt/dhcp options BOOTP options BOOTP_NFSROOT options BOOTP_NFSV3 options BOOTP_WIRED_TO=mge0 options ROOTDEVNAME=\"ufs:/dev/da0p1\" options MUTEX_NOINLINE options RWLOCK_NOINLINE options NO_FFS_SNAPSHOT options NO_SWAPPING # Pseudo devices device random device pty device loop device md # USB device usb device ehci device umass device scbus device pass device da # SATA device mvs # Serial ports device uart # I2C (TWSI) device iic device iicbus device twsi #Network device ether device mge # Marvell Gigabit Ethernet controller device mii device mdio device e1000phy device bpf options DEVICE_POLLING device vlan #PCI/PCIE device pci # Flattened Device Tree options FDT # Configure using FDT/DTB data options FDT_DTB_STATIC makeoptions FDT_DTS_FILE=db78460.dts Index: projects/clang500-import/sys/arm/conf/BEAGLEBONE =================================================================== --- projects/clang500-import/sys/arm/conf/BEAGLEBONE (revision 319548) +++ projects/clang500-import/sys/arm/conf/BEAGLEBONE (revision 319549) @@ -1,135 +1,136 @@ # # BEAGLEBONE -- Custom configuration for the BeagleBone ARM development # platforms, check out http://www.beagleboard.org/bone and # http://www.beagleboard.org/black. This kernel config file is used for the # original BeagleBone and the BeagleBone Black. # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ ident BEAGLEBONE include "std.armv6" include "../ti/am335x/std.am335x" makeoptions MODULES_EXTRA="dtb/am335x am335x_dmtpps" options INTRNG +options MULTIDELAY options SCHED_4BSD # 4BSD scheduler options PLATFORM # NFS server support #options NFSD # NFS root from boopt/dhcp #options BOOTP #options BOOTP_NFSROOT #options BOOTP_COMPAT #options BOOTP_NFSV3 #options BOOTP_WIRED_TO=cpsw0 # Boot device is 2nd slice on MMC/SD card options ROOTDEVNAME=\"ufs:mmcsd0s2\" # MMC/SD/SDIO Card slot support device mmc # mmc/sd bus device mmcsd # mmc/sd flash cards device sdhci # mmc/sd host controller # I2C support device iicbus device iic device ti_i2c device am335x_pmic # AM335x Power Management IC (TPC65217) device am335x_rtc # RTC support (power management only) #define am335x_dmtpps # Pulse Per Second capture driver # Console and misc device uart device uart_ns8250 device pty device snp device md device random # Entropy device # GPIO device gpio device gpioled device gpiobacklight # SPI device ti_spi device spibus # ADC support device ti_adc # Watchdog support # If we don't enable the watchdog driver, the system could potentially # reboot automatically because the boot loader might have enabled the # watchdog. device ti_wdt # TI Programmable Realtime Unit support device ti_pruss # Mailbox support device ti_mbox # PMU support (for CCNT). device pmu # USB support device usb options USB_HOST_ALIGN=64 # Align usb buffers to cache line size. device musb device umass device scbus # SCSI bus (required for ATA/SCSI) device da # Direct Access (disks) # Ethernet device loop device ether device mii device smscphy device cpsw device bpf # USB Ethernet support, requires miibus device miibus # Device mode support device usb_template # Control of the gadget # Pinmux device fdt_pinctrl # Flattened Device Tree options FDT # Configure using FDT/DTB data # Comment following lines for boot console on serial port device vt device videomode device hdmi device ums device ukbd device kbdmux # Uncomment to enable evdev support for ti_adc # options EVDEV_SUPPORT Index: projects/clang500-import/sys/arm/conf/EFIKA_MX =================================================================== --- projects/clang500-import/sys/arm/conf/EFIKA_MX (revision 319548) +++ projects/clang500-import/sys/arm/conf/EFIKA_MX (revision 319549) @@ -1,134 +1,135 @@ # # Kernel configuration for Efika MX Smarttop/Smartbook boards # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ ident EFIKA_MX include "std.armv6" include "../freescale/imx/std.imx51" makeoptions WITHOUT_MODULES="ahc" options SOC_IMX51 options SCHED_4BSD # 4BSD scheduler #options MD_ROOT # MD is a potential root device #options NFSD # Network Filesystem Server options PLATFORM +options MULTIDELAY options INCLUDE_CONFIG_FILE # Include this file in kernel # NFS root from boopt/dhcp #options BOOTP #options BOOTP_NFSROOT #options BOOTP_COMPAT #options BOOTP_NFSV3 #options BOOTP_WIRED_TO=ue0 options ROOTDEVNAME=\"ufs:ada0s2a\" # kernel/memory size reduction #options MUTEX_NOINLINE #options NO_FFS_SNAPSHOT #options NO_SWAPPING #options NO_SYSCTL_DESCR #options RWLOCK_NOINLINE # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support #device vlan # 802.1Q VLAN support #device tun # Packet tunnel. #device md # Memory "disks" #device gif # IPv6 and IPv4 tunneling #device firmware # firmware assist module # Serial (COM) ports device uart # Multi-uart driver device ata device atapci # Only for helper functions device imxata device gpio device gpioled device fsliic device iic device iicbus # SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device da # Direct Access (disks) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) # USB support options USB_HOST_ALIGN=64 # Align usb buffers to cache line size. device ehci # OHCI USB interface device usb # USB Bus (required) device umass # Disks/Mass storage - Requires scbus and da device uhid # "Human Interface Devices" device u3g # USB Ethernet, requires miibus device miibus device aue # ADMtek USB Ethernet device axe # ASIX Electronics USB Ethernet device cdce # Generic USB over Ethernet device cue # CATC USB Ethernet device kue # Kawasaki LSI USB Ethernet device rue # RealTek RTL8150 USB Ethernet device udav # Davicom DM9601E USB # USB Wireless device rum # Ralink Technology RT2501USB wireless NICs # Watchdog timer. # WARNING: can't be disabled!!! device imxwdt # Watchdog # Wireless NIC cards device wlan # 802.11 support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device wlan_amrr # AMRR transmit rate control algorithm # Flattened Device Tree options FDT # Configure using FDT/DTB data options FDT_DTB_STATIC makeoptions FDT_DTS_FILE=efikamx.dts # NOTE: serial console will be disabled if syscons enabled # Uncomment following lines for framebuffer/syscons support device sc device kbdmux options SC_DFLT_FONT # compile font in makeoptions SC_DFLT_FONT=cp437 device ukbd # Allow keyboard like HIDs to control console device ums options INTRNG Index: projects/clang500-import/sys/arm/conf/IMX53 =================================================================== --- projects/clang500-import/sys/arm/conf/IMX53 (revision 319548) +++ projects/clang500-import/sys/arm/conf/IMX53 (revision 319549) @@ -1,122 +1,123 @@ # # Kernel configuration for i.MX53 boards # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ ident IMX53 include "std.armv6" include "../freescale/imx/std.imx53" options SOC_IMX53 options SCHED_4BSD # 4BSD scheduler #options NFSD # Network Filesystem Server options PLATFORM +options MULTIDELAY options INCLUDE_CONFIG_FILE # Include this file in kernel # kernel/memory size reduction #options MUTEX_NOINLINE #options NO_FFS_SNAPSHOT #options NO_SWAPPING #options NO_SYSCTL_DESCR #options RWLOCK_NOINLINE # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support #device vlan # 802.1Q VLAN support #device tun # Packet tunnel. device md # Memory "disks" #device gif # IPv6 and IPv4 tunneling #device firmware # firmware assist module # Ethernet device ffec # Freescale Fast Ethernet Controller device miibus # Standard mii bus # Serial (COM) ports device uart # Multi-uart driver device ata device atapci # Only for helper functions device imxata device gpio device gpioled device fsliic device iic device iicbus # SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device da # Direct Access (disks) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) # USB support options USB_HOST_ALIGN=64 # Align usb buffers to cache line size. device ehci # OHCI USB interface device usb # USB Bus (required) device umass # Disks/Mass storage - Requires scbus and da device uhid # "Human Interface Devices" #device ukbd # Allow keyboard like HIDs to control console device ums # USB Ethernet, requires miibus #device miibus #device aue # ADMtek USB Ethernet #device axe # ASIX Electronics USB Ethernet #device cdce # Generic USB over Ethernet #device cue # CATC USB Ethernet #device kue # Kawasaki LSI USB Ethernet #device rue # RealTek RTL8150 USB Ethernet #device udav # Davicom DM9601E USB # USB Wireless #device rum # Ralink Technology RT2501USB wireless NICs # Watchdog timer. # WARNING: can't be disabled!!! device imxwdt # Watchdog # Wireless NIC cards device wlan # 802.11 support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device wlan_amrr # AMRR transmit rate control algorithm # MMC #device sdhci # SD controller #device mmc # SD/MMC protocol #device mmcsd # SDCard disk device # Flattened Device Tree options FDT # Configure using FDT/DTB data makeoptions MODULES_EXTRA=dtb/imx5 options INTRNG Index: projects/clang500-import/sys/arm/conf/RT1310 =================================================================== --- projects/clang500-import/sys/arm/conf/RT1310 (revision 319548) +++ projects/clang500-import/sys/arm/conf/RT1310 (revision 319549) @@ -1,80 +1,82 @@ # # Custom kernel for RT1310 boards. # # $FreeBSD$ # +# TODO: This fails to build under universe, irnore it until it's fixed +#NO_UNIVERSE ident RT1310 include "std.arm" hints "RT1310.hints" # Flattened Device Tree options FDT options FDT_DTB_STATIC makeoptions FDT_DTS_FILE=wzr2-g300n.dts makeoptions MODULES_OVERRIDE="" #makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WERROR="-Werror" options SCHED_4BSD # 4BSD scheduler options INET # InterNETworking options FFS # Berkeley Fast Filesystem options TMPFS # Efficient memory filesystem options MSDOSFS options ROOTDEVNAME=\"cd9660:/dev/cfid0s.rootfs.uzip\" options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # Posix P1003_1B real-time extensions options MUTEX_NOINLINE options RWLOCK_NOINLINE options NO_FFS_SNAPSHOT options NO_SWAPPING # Debugging options ALT_BREAK_TO_DEBUGGER options DDB #options DEADLKRES # Enable the deadlock resolver #options DIAGNOSTIC #options INVARIANTS # Enable calls of extra sanity checking #options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options KDB options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed #options WITNESS_KDB # Pseudo devices device loop device md device pty device random # Serial ports device uart device uart_ns8250 # Flash device cfi device cfid # Networking device ether device mii device bpf device fv # etherswitch device mdio device etherswitch device miiproxy device ip17x # GPIO device gpio device gpioled device rt1310gpio Index: projects/clang500-import/sys/arm/freescale/imx/imx51_machdep.c =================================================================== --- projects/clang500-import/sys/arm/freescale/imx/imx51_machdep.c (revision 319548) +++ projects/clang500-import/sys/arm/freescale/imx/imx51_machdep.c (revision 319549) @@ -1,102 +1,102 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "platform_if.h" static platform_attach_t imx51_attach; static platform_devmap_init_t imx51_devmap_init; static platform_cpu_reset_t imx51_cpu_reset; static int imx51_attach(platform_t plat) { /* XXX - Get rid of this stuff soon. */ boothowto |= RB_VERBOSE|RB_MULTIPLE; bootverbose = 1; return (0); } /* * Set up static device mappings. This is hand-optimized platform-specific * config data which covers most of the common on-chip devices with a few 1MB * section mappings. * * Notably missing are entries for GPU, IPU, in general anything video related. */ static int imx51_devmap_init(platform_t plat) { devmap_add_entry(0x70000000, 0x00100000); devmap_add_entry(0x73f00000, 0x00100000); devmap_add_entry(0x83f00000, 0x00100000); return (0); } static void imx51_cpu_reset(platform_t plat) { imx_wdog_cpu_reset(0x73F98000); } u_int imx_soc_type(void) { return (IMXSOC_51); } static platform_method_t imx51_methods[] = { PLATFORMMETHOD(platform_attach, imx51_attach), PLATFORMMETHOD(platform_devmap_init, imx51_devmap_init), PLATFORMMETHOD(platform_cpu_reset, imx51_cpu_reset), PLATFORMMETHOD_END, }; -FDT_PLATFORM_DEF(imx51, "i.MX51", 0, "fsl,imx51", 0); +FDT_PLATFORM_DEF(imx51, "i.MX51", 0, "fsl,imx51", 100); Index: projects/clang500-import/sys/arm/freescale/imx/imx53_machdep.c =================================================================== --- projects/clang500-import/sys/arm/freescale/imx/imx53_machdep.c (revision 319548) +++ projects/clang500-import/sys/arm/freescale/imx/imx53_machdep.c (revision 319549) @@ -1,99 +1,98 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "platform_if.h" static platform_attach_t imx53_attach; static platform_devmap_init_t imx53_devmap_init; static platform_cpu_reset_t imx53_cpu_reset; static int imx53_attach(platform_t plat) { return (0); } /* * Set up static device mappings. This is hand-optimized platform-specific * config data which covers most of the common on-chip devices with a few 1MB * section mappings. * * Notably missing are entries for GPU, IPU, in general anything video related. */ static int imx53_devmap_init(platform_t plat) { devmap_add_entry(0x50000000, 0x00100000); devmap_add_entry(0x53f00000, 0x00100000); devmap_add_entry(0x63f00000, 0x00100000); return (0); } static void imx53_cpu_reset(platform_t plat) { imx_wdog_cpu_reset(0x53F98000); } u_int imx_soc_type(void) { return (IMXSOC_53); } static platform_method_t imx53_methods[] = { PLATFORMMETHOD(platform_attach, imx53_attach), PLATFORMMETHOD(platform_devmap_init, imx53_devmap_init), PLATFORMMETHOD(platform_cpu_reset, imx53_cpu_reset), PLATFORMMETHOD_END, }; -FDT_PLATFORM_DEF(imx53, "i.MX53", 0, "fsl,imx53", 0); - +FDT_PLATFORM_DEF(imx53, "i.MX53", 0, "fsl,imx53", 100); Index: projects/clang500-import/sys/arm/versatile/versatile_timer.c =================================================================== --- projects/clang500-import/sys/arm/versatile/versatile_timer.c (revision 319548) +++ projects/clang500-import/sys/arm/versatile/versatile_timer.c (nonexistent) @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2012 Oleksandr Tymoshenko - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -void -cpu_initclocks(void) -{ - cpu_initclocks_bsp(); -} - - Property changes on: projects/clang500-import/sys/arm/versatile/versatile_timer.c ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: projects/clang500-import/sys/arm/versatile/files.versatile =================================================================== --- projects/clang500-import/sys/arm/versatile/files.versatile (revision 319548) +++ projects/clang500-import/sys/arm/versatile/files.versatile (revision 319549) @@ -1,13 +1,12 @@ # $FreeBSD$ arm/versatile/pl050.c optional sc arm/versatile/sp804.c standard arm/versatile/versatile_machdep.c standard arm/versatile/versatile_clcd.c optional sc arm/versatile/versatile_common.c standard arm/versatile/versatile_pci.c optional pci arm/versatile/versatile_scm.c standard arm/versatile/versatile_sic.c standard -arm/versatile/versatile_timer.c standard kern/kern_clocksource.c standard Index: projects/clang500-import/sys/arm/versatile/sp804.c =================================================================== --- projects/clang500-import/sys/arm/versatile/sp804.c (revision 319548) +++ projects/clang500-import/sys/arm/versatile/sp804.c (revision 319549) @@ -1,358 +1,377 @@ /* * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2012 Damjan Marion * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include +#ifdef MULTIDELAY +#include /* For arm_set_delay */ +#endif + #include #include #include #include #define SP804_TIMER1_LOAD 0x00 #define SP804_TIMER1_VALUE 0x04 #define SP804_TIMER1_CONTROL 0x08 #define TIMER_CONTROL_EN (1 << 7) #define TIMER_CONTROL_FREERUN (0 << 6) #define TIMER_CONTROL_PERIODIC (1 << 6) #define TIMER_CONTROL_INTREN (1 << 5) #define TIMER_CONTROL_DIV1 (0 << 2) #define TIMER_CONTROL_DIV16 (1 << 2) #define TIMER_CONTROL_DIV256 (2 << 2) #define TIMER_CONTROL_32BIT (1 << 1) #define TIMER_CONTROL_ONESHOT (1 << 0) #define SP804_TIMER1_INTCLR 0x0C #define SP804_TIMER1_RIS 0x10 #define SP804_TIMER1_MIS 0x14 #define SP804_TIMER1_BGLOAD 0x18 #define SP804_TIMER2_LOAD 0x20 #define SP804_TIMER2_VALUE 0x24 #define SP804_TIMER2_CONTROL 0x28 #define SP804_TIMER2_INTCLR 0x2C #define SP804_TIMER2_RIS 0x30 #define SP804_TIMER2_MIS 0x34 #define SP804_TIMER2_BGLOAD 0x38 #define SP804_PERIPH_ID0 0xFE0 #define SP804_PERIPH_ID1 0xFE4 #define SP804_PERIPH_ID2 0xFE8 #define SP804_PERIPH_ID3 0xFEC #define SP804_PRIMECELL_ID0 0xFF0 #define SP804_PRIMECELL_ID1 0xFF4 #define SP804_PRIMECELL_ID2 0xFF8 #define SP804_PRIMECELL_ID3 0xFFC #define DEFAULT_FREQUENCY 1000000 /* * QEMU seems to have problem with full frequency */ #define DEFAULT_DIVISOR 16 #define DEFAULT_CONTROL_DIV TIMER_CONTROL_DIV16 struct sp804_timer_softc { struct resource* mem_res; struct resource* irq_res; void* intr_hl; uint32_t sysclk_freq; bus_space_tag_t bst; bus_space_handle_t bsh; struct timecounter tc; bool et_enabled; struct eventtimer et; int timer_initialized; }; /* Read/Write macros for Timer used as timecounter */ #define sp804_timer_tc_read_4(reg) \ bus_space_read_4(sc->bst, sc->bsh, reg) #define sp804_timer_tc_write_4(reg, val) \ bus_space_write_4(sc->bst, sc->bsh, reg, val) static unsigned sp804_timer_tc_get_timecount(struct timecounter *); +static void sp804_timer_delay(int, void *); static unsigned sp804_timer_tc_get_timecount(struct timecounter *tc) { struct sp804_timer_softc *sc = tc->tc_priv; return 0xffffffff - sp804_timer_tc_read_4(SP804_TIMER1_VALUE); } static int sp804_timer_start(struct eventtimer *et, sbintime_t first, sbintime_t period) { struct sp804_timer_softc *sc = et->et_priv; uint32_t count, reg; if (first != 0) { sc->et_enabled = 1; count = ((uint32_t)et->et_frequency * first) >> 32; sp804_timer_tc_write_4(SP804_TIMER2_LOAD, count); reg = TIMER_CONTROL_32BIT | TIMER_CONTROL_INTREN | TIMER_CONTROL_PERIODIC | DEFAULT_CONTROL_DIV | TIMER_CONTROL_EN; sp804_timer_tc_write_4(SP804_TIMER2_CONTROL, reg); return (0); } if (period != 0) { panic("period"); } return (EINVAL); } static int sp804_timer_stop(struct eventtimer *et) { struct sp804_timer_softc *sc = et->et_priv; uint32_t reg; sc->et_enabled = 0; reg = sp804_timer_tc_read_4(SP804_TIMER2_CONTROL); reg &= ~(TIMER_CONTROL_EN); sp804_timer_tc_write_4(SP804_TIMER2_CONTROL, reg); return (0); } static int sp804_timer_intr(void *arg) { struct sp804_timer_softc *sc = arg; static uint32_t prev = 0; uint32_t x = 0; x = sp804_timer_tc_read_4(SP804_TIMER1_VALUE); prev =x ; sp804_timer_tc_write_4(SP804_TIMER2_INTCLR, 1); if (sc->et_enabled) { if (sc->et.et_active) { sc->et.et_event_cb(&sc->et, sc->et.et_arg); } } return (FILTER_HANDLED); } static int sp804_timer_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "arm,sp804")) { device_set_desc(dev, "SP804 System Timer"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int sp804_timer_attach(device_t dev) { struct sp804_timer_softc *sc = device_get_softc(dev); int rid = 0; int i; uint32_t id, reg; phandle_t node; pcell_t clock; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->mem_res); sc->bsh = rman_get_bushandle(sc->mem_res); /* Request the IRQ resources */ sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Error: could not allocate irq resources\n"); return (ENXIO); } sc->sysclk_freq = DEFAULT_FREQUENCY; /* Get the base clock frequency */ node = ofw_bus_get_node(dev); if ((OF_getencprop(node, "clock-frequency", &clock, sizeof(clock))) > 0) { sc->sysclk_freq = clock; } /* Setup and enable the timer */ if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_CLK, sp804_timer_intr, NULL, sc, &sc->intr_hl) != 0) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->irq_res); device_printf(dev, "Unable to setup the clock irq handler.\n"); return (ENXIO); } sp804_timer_tc_write_4(SP804_TIMER1_CONTROL, 0); sp804_timer_tc_write_4(SP804_TIMER2_CONTROL, 0); /* * Timer 1, timecounter */ sc->tc.tc_frequency = sc->sysclk_freq; sc->tc.tc_name = "SP804-1"; sc->tc.tc_get_timecount = sp804_timer_tc_get_timecount; sc->tc.tc_poll_pps = NULL; sc->tc.tc_counter_mask = ~0u; sc->tc.tc_quality = 1000; sc->tc.tc_priv = sc; sp804_timer_tc_write_4(SP804_TIMER1_VALUE, 0xffffffff); sp804_timer_tc_write_4(SP804_TIMER1_LOAD, 0xffffffff); reg = TIMER_CONTROL_PERIODIC | TIMER_CONTROL_32BIT; sp804_timer_tc_write_4(SP804_TIMER1_CONTROL, reg); reg |= TIMER_CONTROL_EN; sp804_timer_tc_write_4(SP804_TIMER1_CONTROL, reg); tc_init(&sc->tc); /* * Timer 2, event timer */ sc->et_enabled = 0; sc->et.et_name = "SP804-2"; sc->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT; sc->et.et_quality = 1000; sc->et.et_frequency = sc->sysclk_freq / DEFAULT_DIVISOR; sc->et.et_min_period = (0x00000002LLU << 32) / sc->et.et_frequency; sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; sc->et.et_start = sp804_timer_start; sc->et.et_stop = sp804_timer_stop; sc->et.et_priv = sc; et_register(&sc->et); id = 0; for (i = 3; i >= 0; i--) { id = (id << 8) | (sp804_timer_tc_read_4(SP804_PERIPH_ID0 + i*4) & 0xff); } device_printf(dev, "peripheral ID: %08x\n", id); id = 0; for (i = 3; i >= 0; i--) { id = (id << 8) | (sp804_timer_tc_read_4(SP804_PRIMECELL_ID0 + i*4) & 0xff); } +#ifdef MULTIDELAY + arm_set_delay(sp804_timer_delay, sc); +#endif + device_printf(dev, "PrimeCell ID: %08x\n", id); sc->timer_initialized = 1; return (0); } static device_method_t sp804_timer_methods[] = { DEVMETHOD(device_probe, sp804_timer_probe), DEVMETHOD(device_attach, sp804_timer_attach), { 0, 0 } }; static driver_t sp804_timer_driver = { "timer", sp804_timer_methods, sizeof(struct sp804_timer_softc), }; static devclass_t sp804_timer_devclass; DRIVER_MODULE(sp804_timer, simplebus, sp804_timer_driver, sp804_timer_devclass, 0, 0); +static void +sp804_timer_delay(int usec, void *arg) +{ + struct sp804_timer_softc *sc = arg; + int32_t counts; + uint32_t first, last; + + /* Get the number of times to count */ + counts = usec * ((sc->tc.tc_frequency / 1000000) + 1); + + first = sp804_timer_tc_get_timecount(&sc->tc); + + while (counts > 0) { + last = sp804_timer_tc_get_timecount(&sc->tc); + if (last == first) + continue; + if (last > first) { + counts -= (int32_t)(last - first); + } else { + counts -= (int32_t)((0xFFFFFFFF - first) + last); + } + first = last; + } +} + +#ifndef MULTIDELAY void DELAY(int usec) { int32_t counts; - uint32_t first, last; device_t timer_dev; struct sp804_timer_softc *sc; int timer_initialized = 0; timer_dev = devclass_get_device(sp804_timer_devclass, 0); if (timer_dev) { sc = device_get_softc(timer_dev); if (sc) timer_initialized = sc->timer_initialized; } if (!timer_initialized) { /* * Timer is not initialized yet */ for (; usec > 0; usec--) for (counts = 200; counts > 0; counts--) /* Prevent gcc from optimizing out the loop */ cpufunc_nullop(); - return; + } else { + sp804_timer_delay(usec, sc); } - - /* Get the number of times to count */ - counts = usec * ((sc->tc.tc_frequency / 1000000) + 1); - - first = sp804_timer_tc_get_timecount(&sc->tc); - - while (counts > 0) { - last = sp804_timer_tc_get_timecount(&sc->tc); - if (last == first) - continue; - if (last>first) { - counts -= (int32_t)(last - first); - } else { - counts -= (int32_t)((0xFFFFFFFF - first) + last); - } - first = last; - } } +#endif Index: projects/clang500-import/sys/arm64/arm64/pmap.c =================================================================== --- projects/clang500-import/sys/arm64/arm64/pmap.c (revision 319548) +++ projects/clang500-import/sys/arm64/arm64/pmap.c (revision 319549) @@ -1,4878 +1,4829 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 2003 Peter Wemm * All rights reserved. * Copyright (c) 2005-2010 Alan L. Cox * All rights reserved. * Copyright (c) 2014 Andrew Turner * All rights reserved. * Copyright (c) 2014-2016 The FreeBSD Foundation * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 */ /*- * Copyright (c) 2003 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Jake Burkholder, * Safeport Network Services, and Network Associates Laboratories, the * Security Research Division of Network Associates, Inc. under * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA * CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Manages physical address maps. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL3PG (PAGE_SIZE/(sizeof (pt_entry_t))) #define NUL0E L0_ENTRIES #define NUL1E (NUL0E * NL1PG) #define NUL2E (NUL1E * NL2PG) #if !defined(DIAGNOSTIC) #ifdef __GNUC_GNU_INLINE__ #define PMAP_INLINE __attribute__((__gnu_inline__)) inline #else #define PMAP_INLINE extern inline #endif #else #define PMAP_INLINE #endif /* * These are configured by the mair_el1 register. This is set up in locore.S */ #define DEVICE_MEMORY 0 #define UNCACHED_MEMORY 1 #define CACHED_MEMORY 2 #ifdef PV_STATS #define PV_STAT(x) do { x ; } while (0) #else #define PV_STAT(x) do { } while (0) #endif #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) #define pa_to_pvh(pa) (&pv_table[pmap_l2_pindex(pa)]) #define NPV_LIST_LOCKS MAXCPU #define PHYS_TO_PV_LIST_LOCK(pa) \ (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ struct rwlock **_lockp = (lockp); \ struct rwlock *_new_lock; \ \ _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \ if (_new_lock != *_lockp) { \ if (*_lockp != NULL) \ rw_wunlock(*_lockp); \ *_lockp = _new_lock; \ rw_wlock(*_lockp); \ } \ } while (0) #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) #define RELEASE_PV_LIST_LOCK(lockp) do { \ struct rwlock **_lockp = (lockp); \ \ if (*_lockp != NULL) { \ rw_wunlock(*_lockp); \ *_lockp = NULL; \ } \ } while (0) #define VM_PAGE_TO_PV_LIST_LOCK(m) \ PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ vm_offset_t kernel_vm_end = 0; struct msgbuf *msgbufp = NULL; /* * Data for the pv entry allocation mechanism. * Updates to pv_invl_gen are protected by the pv_list_locks[] * elements, but reads are not. */ static struct md_page *pv_table; static struct md_page pv_dummy; vm_paddr_t dmap_phys_base; /* The start of the dmap region */ vm_paddr_t dmap_phys_max; /* The limit of the dmap region */ vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */ /* This code assumes all L1 DMAP entries will be used */ CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS); CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS); #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT) extern pt_entry_t pagetable_dmap[]; static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); static int superpages_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0, "Are large page mappings enabled?"); /* * Data for the pv entry allocation mechanism */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); static struct mtx pv_chunks_mutex; static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode); static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode); static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va); static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, struct rwlock **lockp); static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva, pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp); static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); static int pmap_unuse_l3(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); /* * These load the old table data and store the new value. * They need to be atomic as the System MMU may write to the table at * the same time as the CPU. */ #define pmap_load_store(table, entry) atomic_swap_64(table, entry) #define pmap_set(table, mask) atomic_set_64(table, mask) #define pmap_load_clear(table) atomic_swap_64(table, 0) #define pmap_load(table) (*table) /********************/ /* Inline functions */ /********************/ static __inline void pagecopy(void *s, void *d) { memcpy(d, s, PAGE_SIZE); } #define pmap_l0_index(va) (((va) >> L0_SHIFT) & L0_ADDR_MASK) #define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK) #define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK) #define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK) static __inline pd_entry_t * pmap_l0(pmap_t pmap, vm_offset_t va) { return (&pmap->pm_l0[pmap_l0_index(va)]); } static __inline pd_entry_t * pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) { pd_entry_t *l1; l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); return (&l1[pmap_l1_index(va)]); } static __inline pd_entry_t * pmap_l1(pmap_t pmap, vm_offset_t va) { pd_entry_t *l0; l0 = pmap_l0(pmap, va); if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE) return (NULL); return (pmap_l0_to_l1(l0, va)); } static __inline pd_entry_t * pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va) { pd_entry_t *l2; l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); return (&l2[pmap_l2_index(va)]); } static __inline pd_entry_t * pmap_l2(pmap_t pmap, vm_offset_t va) { pd_entry_t *l1; l1 = pmap_l1(pmap, va); if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE) return (NULL); return (pmap_l1_to_l2(l1, va)); } static __inline pt_entry_t * pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va) { pt_entry_t *l3; l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK); return (&l3[pmap_l3_index(va)]); } /* * Returns the lowest valid pde for a given virtual address. * The next level may or may not point to a valid page or block. */ static __inline pd_entry_t * pmap_pde(pmap_t pmap, vm_offset_t va, int *level) { pd_entry_t *l0, *l1, *l2, desc; l0 = pmap_l0(pmap, va); desc = pmap_load(l0) & ATTR_DESCR_MASK; if (desc != L0_TABLE) { *level = -1; return (NULL); } l1 = pmap_l0_to_l1(l0, va); desc = pmap_load(l1) & ATTR_DESCR_MASK; if (desc != L1_TABLE) { *level = 0; return (l0); } l2 = pmap_l1_to_l2(l1, va); desc = pmap_load(l2) & ATTR_DESCR_MASK; if (desc != L2_TABLE) { *level = 1; return (l1); } *level = 2; return (l2); } /* * Returns the lowest valid pte block or table entry for a given virtual * address. If there are no valid entries return NULL and set the level to * the first invalid level. */ static __inline pt_entry_t * pmap_pte(pmap_t pmap, vm_offset_t va, int *level) { pd_entry_t *l1, *l2, desc; pt_entry_t *l3; l1 = pmap_l1(pmap, va); if (l1 == NULL) { *level = 0; return (NULL); } desc = pmap_load(l1) & ATTR_DESCR_MASK; if (desc == L1_BLOCK) { *level = 1; return (l1); } if (desc != L1_TABLE) { *level = 1; return (NULL); } l2 = pmap_l1_to_l2(l1, va); desc = pmap_load(l2) & ATTR_DESCR_MASK; if (desc == L2_BLOCK) { *level = 2; return (l2); } if (desc != L2_TABLE) { *level = 2; return (NULL); } *level = 3; l3 = pmap_l2_to_l3(l2, va); if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE) return (NULL); return (l3); } static inline bool pmap_superpages_enabled(void) { return (superpages_enabled != 0); } bool pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1, pd_entry_t **l2, pt_entry_t **l3) { pd_entry_t *l0p, *l1p, *l2p; if (pmap->pm_l0 == NULL) return (false); l0p = pmap_l0(pmap, va); *l0 = l0p; if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE) return (false); l1p = pmap_l0_to_l1(l0p, va); *l1 = l1p; if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) { *l2 = NULL; *l3 = NULL; return (true); } if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE) return (false); l2p = pmap_l1_to_l2(l1p, va); *l2 = l2p; if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) { *l3 = NULL; return (true); } *l3 = pmap_l2_to_l3(l2p, va); return (true); } static __inline int -pmap_is_current(pmap_t pmap) -{ - - return ((pmap == pmap_kernel()) || - (pmap == curthread->td_proc->p_vmspace->vm_map.pmap)); -} - -static __inline int pmap_l3_valid(pt_entry_t l3) { return ((l3 & ATTR_DESCR_MASK) == L3_PAGE); } -/* Is a level 1 or 2entry a valid block and cacheable */ CTASSERT(L1_BLOCK == L2_BLOCK); -static __inline int -pmap_pte_valid_cacheable(pt_entry_t pte) -{ - return (((pte & ATTR_DESCR_MASK) == L1_BLOCK) && - ((pte & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY))); -} - -static __inline int -pmap_l3_valid_cacheable(pt_entry_t l3) -{ - - return (((l3 & ATTR_DESCR_MASK) == L3_PAGE) && - ((l3 & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY))); -} - #define PTE_SYNC(pte) cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte)) /* * Checks if the page is dirty. We currently lack proper tracking of this on * arm64 so for now assume is a page mapped as rw was accessed it is. */ static inline int pmap_page_dirty(pt_entry_t pte) { return ((pte & (ATTR_AF | ATTR_AP_RW_BIT)) == (ATTR_AF | ATTR_AP(ATTR_AP_RW))); } static __inline void pmap_resident_count_inc(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); pmap->pm_stats.resident_count += count; } static __inline void pmap_resident_count_dec(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(pmap->pm_stats.resident_count >= count, ("pmap %p resident count underflow %ld %d", pmap, pmap->pm_stats.resident_count, count)); pmap->pm_stats.resident_count -= count; } static pt_entry_t * pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot, u_int *l2_slot) { pt_entry_t *l2; pd_entry_t *l1; l1 = (pd_entry_t *)l1pt; *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK; /* Check locore has used a table L1 map */ KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE, ("Invalid bootstrap L1 table")); /* Find the address of the L2 table */ l2 = (pt_entry_t *)init_pt_va; *l2_slot = pmap_l2_index(va); return (l2); } static vm_paddr_t pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va) { u_int l1_slot, l2_slot; pt_entry_t *l2; l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot); return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET)); } static void pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa, vm_paddr_t max_pa) { vm_offset_t va; vm_paddr_t pa; u_int l1_slot; pa = dmap_phys_base = min_pa & ~L1_OFFSET; va = DMAP_MIN_ADDRESS; for (; va < DMAP_MAX_ADDRESS && pa < max_pa; pa += L1_SIZE, va += L1_SIZE, l1_slot++) { l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); pmap_load_store(&pagetable_dmap[l1_slot], (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN | ATTR_IDX(CACHED_MEMORY) | L1_BLOCK); } /* Set the upper limit of the DMAP region */ dmap_phys_max = pa; dmap_max_addr = va; cpu_dcache_wb_range((vm_offset_t)pagetable_dmap, PAGE_SIZE * DMAP_TABLES); cpu_tlb_flushID(); } static vm_offset_t pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start) { vm_offset_t l2pt; vm_paddr_t pa; pd_entry_t *l1; u_int l1_slot; KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address")); l1 = (pd_entry_t *)l1pt; l1_slot = pmap_l1_index(va); l2pt = l2_start; for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) { KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index")); pa = pmap_early_vtophys(l1pt, l2pt); pmap_load_store(&l1[l1_slot], (pa & ~Ln_TABLE_MASK) | L1_TABLE); l2pt += PAGE_SIZE; } /* Clean the L2 page table */ memset((void *)l2_start, 0, l2pt - l2_start); cpu_dcache_wb_range(l2_start, l2pt - l2_start); /* Flush the l1 table to ram */ cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE); return l2pt; } static vm_offset_t pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start) { vm_offset_t l2pt, l3pt; vm_paddr_t pa; pd_entry_t *l2; u_int l2_slot; KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address")); l2 = pmap_l2(kernel_pmap, va); l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE); l2pt = (vm_offset_t)l2; l2_slot = pmap_l2_index(va); l3pt = l3_start; for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) { KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); pa = pmap_early_vtophys(l1pt, l3pt); pmap_load_store(&l2[l2_slot], (pa & ~Ln_TABLE_MASK) | L2_TABLE); l3pt += PAGE_SIZE; } /* Clean the L2 page table */ memset((void *)l3_start, 0, l3pt - l3_start); cpu_dcache_wb_range(l3_start, l3pt - l3_start); cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE); return l3pt; } /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) { u_int l1_slot, l2_slot, avail_slot, map_slot, used_map_slot; uint64_t kern_delta; pt_entry_t *l2; vm_offset_t va, freemempos; vm_offset_t dpcpu, msgbufpv; vm_paddr_t pa, max_pa, min_pa; int i; kern_delta = KERNBASE - kernstart; physmem = 0; printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen); printf("%lx\n", l1pt); printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK); /* Set this early so we can use the pagetable walking functions */ kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt; PMAP_LOCK_INIT(kernel_pmap); /* Assume the address we were loaded to is a valid physical address */ min_pa = max_pa = KERNBASE - kern_delta; /* * Find the minimum physical address. physmap is sorted, * but may contain empty ranges. */ for (i = 0; i < (physmap_idx * 2); i += 2) { if (physmap[i] == physmap[i + 1]) continue; if (physmap[i] <= min_pa) min_pa = physmap[i]; if (physmap[i + 1] > max_pa) max_pa = physmap[i + 1]; } /* Create a direct map region early so we can use it for pa -> va */ pmap_bootstrap_dmap(l1pt, min_pa, max_pa); va = KERNBASE; pa = KERNBASE - kern_delta; /* * Start to initialise phys_avail by copying from physmap * up to the physical address KERNBASE points at. */ map_slot = avail_slot = 0; for (; map_slot < (physmap_idx * 2) && avail_slot < (PHYS_AVAIL_SIZE - 2); map_slot += 2) { if (physmap[map_slot] == physmap[map_slot + 1]) continue; if (physmap[map_slot] <= pa && physmap[map_slot + 1] > pa) break; phys_avail[avail_slot] = physmap[map_slot]; phys_avail[avail_slot + 1] = physmap[map_slot + 1]; physmem += (phys_avail[avail_slot + 1] - phys_avail[avail_slot]) >> PAGE_SHIFT; avail_slot += 2; } /* Add the memory before the kernel */ if (physmap[avail_slot] < pa && avail_slot < (PHYS_AVAIL_SIZE - 2)) { phys_avail[avail_slot] = physmap[map_slot]; phys_avail[avail_slot + 1] = pa; physmem += (phys_avail[avail_slot + 1] - phys_avail[avail_slot]) >> PAGE_SHIFT; avail_slot += 2; } used_map_slot = map_slot; /* * Read the page table to find out what is already mapped. * This assumes we have mapped a block of memory from KERNBASE * using a single L1 entry. */ l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot); /* Sanity check the index, KERNBASE should be the first VA */ KASSERT(l2_slot == 0, ("The L2 index is non-zero")); /* Find how many pages we have mapped */ for (; l2_slot < Ln_ENTRIES; l2_slot++) { if ((l2[l2_slot] & ATTR_DESCR_MASK) == 0) break; /* Check locore used L2 blocks */ KASSERT((l2[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK, ("Invalid bootstrap L2 table")); KASSERT((l2[l2_slot] & ~ATTR_MASK) == pa, ("Incorrect PA in L2 table")); va += L2_SIZE; pa += L2_SIZE; } va = roundup2(va, L1_SIZE); freemempos = KERNBASE + kernlen; freemempos = roundup2(freemempos, PAGE_SIZE); /* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS */ freemempos = pmap_bootstrap_l2(l1pt, va, freemempos); /* And the l3 tables for the early devmap */ freemempos = pmap_bootstrap_l3(l1pt, VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos); cpu_tlb_flushID(); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); /* Allocate dynamic per-cpu area. */ alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */ alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); msgbufp = (void *)msgbufpv; virtual_avail = roundup2(freemempos, L1_SIZE); virtual_end = VM_MAX_KERNEL_ADDRESS - L2_SIZE; kernel_vm_end = virtual_avail; pa = pmap_early_vtophys(l1pt, freemempos); /* Finish initialising physmap */ map_slot = used_map_slot; for (; avail_slot < (PHYS_AVAIL_SIZE - 2) && map_slot < (physmap_idx * 2); map_slot += 2) { if (physmap[map_slot] == physmap[map_slot + 1]) continue; /* Have we used the current range? */ if (physmap[map_slot + 1] <= pa) continue; /* Do we need to split the entry? */ if (physmap[map_slot] < pa) { phys_avail[avail_slot] = pa; phys_avail[avail_slot + 1] = physmap[map_slot + 1]; } else { phys_avail[avail_slot] = physmap[map_slot]; phys_avail[avail_slot + 1] = physmap[map_slot + 1]; } physmem += (phys_avail[avail_slot + 1] - phys_avail[avail_slot]) >> PAGE_SHIFT; avail_slot += 2; } phys_avail[avail_slot] = 0; phys_avail[avail_slot + 1] = 0; /* * Maxmem isn't the "maximum memory", it's one larger than the * highest page of the physical address space. It should be * called something like "Maxphyspage". */ Maxmem = atop(phys_avail[avail_slot - 1]); cpu_tlb_flushID(); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_memattr = VM_MEMATTR_WRITE_BACK; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { vm_size_t s; int i, pv_npg; /* * Are large page mappings enabled? */ TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled); /* * Initialize the pv chunk list mutex. */ mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); /* * Initialize the pool of pv list locks. */ for (i = 0; i < NPV_LIST_LOCKS; i++) rw_init(&pv_list_locks[i], "pmap pv list"); /* * Calculate the size of the pv head table for superpages. */ pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE); /* * Allocate memory for the pv head table for superpages. */ s = (vm_size_t)(pv_npg * sizeof(struct md_page)); s = round_page(s); pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); TAILQ_INIT(&pv_dummy.pv_list); } static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD, 0, "2MB page mapping counters"); static u_long pmap_l2_demotions; SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD, &pmap_l2_demotions, 0, "2MB page demotions"); static u_long pmap_l2_p_failures; SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD, &pmap_l2_p_failures, 0, "2MB page promotion failures"); static u_long pmap_l2_promotions; SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD, &pmap_l2_promotions, 0, "2MB page promotions"); /* * Invalidate a single TLB entry. */ PMAP_INLINE void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { sched_pin(); __asm __volatile( "dsb ishst \n" "tlbi vaae1is, %0 \n" "dsb ish \n" "isb \n" : : "r"(va >> PAGE_SHIFT)); sched_unpin(); } PMAP_INLINE void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t addr; sched_pin(); dsb(ishst); for (addr = sva; addr < eva; addr += PAGE_SIZE) { __asm __volatile( "tlbi vaae1is, %0" : : "r"(addr >> PAGE_SHIFT)); } __asm __volatile( "dsb ish \n" "isb \n"); sched_unpin(); } PMAP_INLINE void pmap_invalidate_all(pmap_t pmap) { sched_pin(); __asm __volatile( "dsb ishst \n" "tlbi vmalle1is \n" "dsb ish \n" "isb \n"); sched_unpin(); } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { pt_entry_t *pte, tpte; vm_paddr_t pa; int lvl; pa = 0; PMAP_LOCK(pmap); /* * Find the block or page map for this virtual address. pmap_pte * will return either a valid block/page entry, or NULL. */ pte = pmap_pte(pmap, va, &lvl); if (pte != NULL) { tpte = pmap_load(pte); pa = tpte & ~ATTR_MASK; switch(lvl) { case 1: KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK, ("pmap_extract: Invalid L1 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L1_OFFSET); break; case 2: KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_extract: Invalid L2 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L2_OFFSET); break; case 3: KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE, ("pmap_extract: Invalid L3 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L3_OFFSET); break; } } PMAP_UNLOCK(pmap); return (pa); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pt_entry_t *pte, tpte; vm_offset_t off; vm_paddr_t pa; vm_page_t m; int lvl; pa = 0; m = NULL; PMAP_LOCK(pmap); retry: pte = pmap_pte(pmap, va, &lvl); if (pte != NULL) { tpte = pmap_load(pte); KASSERT(lvl > 0 && lvl <= 3, ("pmap_extract_and_hold: Invalid level %d", lvl)); CTASSERT(L1_BLOCK == L2_BLOCK); KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) || (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK), ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl, tpte & ATTR_DESCR_MASK)); if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) || ((prot & VM_PROT_WRITE) == 0)) { switch(lvl) { case 1: off = va & L1_OFFSET; break; case 2: off = va & L2_OFFSET; break; case 3: default: off = 0; } if (vm_page_pa_tryrelock(pmap, (tpte & ~ATTR_MASK) | off, &pa)) goto retry; m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off); vm_page_hold(m); } } PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } vm_paddr_t pmap_kextract(vm_offset_t va) { pt_entry_t *pte, tpte; vm_paddr_t pa; int lvl; if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { pa = DMAP_TO_PHYS(va); } else { pa = 0; pte = pmap_pte(kernel_pmap, va, &lvl); if (pte != NULL) { tpte = pmap_load(pte); pa = tpte & ~ATTR_MASK; switch(lvl) { case 1: KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK, ("pmap_kextract: Invalid L1 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L1_OFFSET); break; case 2: KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_kextract: Invalid L2 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L2_OFFSET); break; case 3: KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE, ("pmap_kextract: Invalid L3 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L3_OFFSET); break; } } } return (pa); } /*************************************************** * Low level mapping routines..... ***************************************************/ static void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode) { pd_entry_t *pde; pt_entry_t *pte, attr; vm_offset_t va; int lvl; KASSERT((pa & L3_OFFSET) == 0, ("pmap_kenter: Invalid physical address")); KASSERT((sva & L3_OFFSET) == 0, ("pmap_kenter: Invalid virtual address")); KASSERT((size & PAGE_MASK) == 0, ("pmap_kenter: Mapping is not page-sized")); attr = ATTR_DEFAULT | ATTR_IDX(mode) | L3_PAGE; if (mode == DEVICE_MEMORY) attr |= ATTR_XN; va = sva; while (size != 0) { pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_kenter: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl)); pte = pmap_l2_to_l3(pde, va); pmap_load_store(pte, (pa & ~L3_OFFSET) | attr); PTE_SYNC(pte); va += PAGE_SIZE; pa += PAGE_SIZE; size -= PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } void pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) { pmap_kenter(sva, size, pa, DEVICE_MEMORY); } /* * Remove a page from the kernel pagetables. */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { pt_entry_t *pte; int lvl; pte = pmap_pte(kernel_pmap, va, &lvl); KASSERT(pte != NULL, ("pmap_kremove: Invalid address")); KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl)); - if (pmap_l3_valid_cacheable(pmap_load(pte))) - cpu_dcache_wb_range(va, L3_SIZE); pmap_load_clear(pte); PTE_SYNC(pte); pmap_invalidate_page(kernel_pmap, va); } void pmap_kremove_device(vm_offset_t sva, vm_size_t size) { pt_entry_t *pte; vm_offset_t va; int lvl; KASSERT((sva & L3_OFFSET) == 0, ("pmap_kremove_device: Invalid virtual address")); KASSERT((size & PAGE_MASK) == 0, ("pmap_kremove_device: Mapping is not page-sized")); va = sva; while (size != 0) { pte = pmap_pte(kernel_pmap, va, &lvl); KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va)); KASSERT(lvl == 3, ("Invalid device pagetable level: %d != 3", lvl)); pmap_load_clear(pte); PTE_SYNC(pte); va += PAGE_SIZE; size -= PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { return PHYS_TO_DMAP(start); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) { pd_entry_t *pde; pt_entry_t *pte, pa; vm_offset_t va; vm_page_t m; int i, lvl; va = sva; for (i = 0; i < count; i++) { pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_qenter: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_qenter: Invalid level %d", lvl)); m = ma[i]; pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) | ATTR_IDX(m->md.pv_memattr) | L3_PAGE; if (m->md.pv_memattr == DEVICE_MEMORY) pa |= ATTR_XN; pte = pmap_l2_to_l3(pde, va); pmap_load_store(pte, pa); PTE_SYNC(pte); va += L3_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(vm_offset_t sva, int count) { pt_entry_t *pte; vm_offset_t va; int lvl; KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva)); va = sva; while (count-- > 0) { pte = pmap_pte(kernel_pmap, va, &lvl); KASSERT(lvl == 3, ("Invalid device pagetable level: %d != 3", lvl)); if (pte != NULL) { - if (pmap_l3_valid_cacheable(pmap_load(pte))) - cpu_dcache_wb_range(va, L3_SIZE); pmap_load_clear(pte); PTE_SYNC(pte); } va += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /*************************************************** * Page table page management routines..... ***************************************************/ static __inline void pmap_free_zero_pages(struct spglist *free) { vm_page_t m; while ((m = SLIST_FIRST(free)) != NULL) { SLIST_REMOVE_HEAD(free, plinks.s.ss); /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } } /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the * physical memory manager after the TLB has been updated. */ static __inline void pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, boolean_t set_PG_ZERO) { if (set_PG_ZERO) m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; SLIST_INSERT_HEAD(free, m, plinks.s.ss); } /* * Decrements a page table page's wire count, which is used to record the * number of valid page table entries within the page. If the wire count * drops to zero, then the page table page is unmapped. Returns TRUE if the * page table page was unmapped and FALSE otherwise. */ static inline boolean_t pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { --m->wire_count; if (m->wire_count == 0) { _pmap_unwire_l3(pmap, va, m, free); return (TRUE); } else return (FALSE); } static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * unmap the page table page */ if (m->pindex >= (NUL2E + NUL1E)) { /* l1 page */ pd_entry_t *l0; l0 = pmap_l0(pmap, va); pmap_load_clear(l0); PTE_SYNC(l0); } else if (m->pindex >= NUL2E) { /* l2 page */ pd_entry_t *l1; l1 = pmap_l1(pmap, va); pmap_load_clear(l1); PTE_SYNC(l1); } else { /* l3 page */ pd_entry_t *l2; l2 = pmap_l2(pmap, va); pmap_load_clear(l2); PTE_SYNC(l2); } pmap_resident_count_dec(pmap, 1); if (m->pindex < NUL2E) { /* We just released an l3, unhold the matching l2 */ pd_entry_t *l1, tl1; vm_page_t l2pg; l1 = pmap_l1(pmap, va); tl1 = pmap_load(l1); l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); pmap_unwire_l3(pmap, va, l2pg, free); } else if (m->pindex < (NUL2E + NUL1E)) { /* We just released an l2, unhold the matching l1 */ pd_entry_t *l0, tl0; vm_page_t l1pg; l0 = pmap_l0(pmap, va); tl0 = pmap_load(l0); l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); pmap_unwire_l3(pmap, va, l1pg, free); } pmap_invalidate_page(pmap, va); /* * This is a release store so that the ordinary store unmapping * the page table page is globally performed before TLB shoot- * down is begun. */ atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1); /* * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ pmap_add_delayed_free_list(m, free, TRUE); } /* * After removing an l3 entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ static int pmap_unuse_l3(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, struct spglist *free) { vm_page_t mpte; if (va >= VM_MAXUSER_ADDRESS) return (0); KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK); return (pmap_unwire_l3(pmap, va, mpte, free)); } void pmap_pinit0(pmap_t pmap) { PMAP_LOCK_INIT(pmap); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); pmap->pm_l0 = kernel_pmap->pm_l0; pmap->pm_root.rt_root = 0; } int pmap_pinit(pmap_t pmap) { vm_paddr_t l0phys; vm_page_t l0pt; /* * allocate the l0 page */ while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) VM_WAIT; l0phys = VM_PAGE_TO_PHYS(l0pt); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys); if ((l0pt->flags & PG_ZERO) == 0) pagezero(pmap->pm_l0); pmap->pm_root.rt_root = 0; bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); return (1); } /* * This routine is called if the desired page table page does not exist. * * If page table page allocation fails, this routine may sleep before * returning NULL. It sleeps only if a lock pointer was given. * * Note: If a page allocation fails at page table level two or three, * one or two pages may be held during the wait, only to be released * afterwards. This conservative approach is easily argued to avoid * race conditions. */ static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) { vm_page_t m, l1pg, l2pg; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * Allocate a page table page. */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); VM_WAIT; PMAP_LOCK(pmap); } /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); /* * Map the pagetable page into the process address space, if * it isn't already there. */ if (ptepindex >= (NUL2E + NUL1E)) { pd_entry_t *l0; vm_pindex_t l0index; l0index = ptepindex - (NUL2E + NUL1E); l0 = &pmap->pm_l0[l0index]; pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE); PTE_SYNC(l0); } else if (ptepindex >= NUL2E) { vm_pindex_t l0index, l1index; pd_entry_t *l0, *l1; pd_entry_t tl0; l1index = ptepindex - NUL2E; l0index = l1index >> L0_ENTRIES_SHIFT; l0 = &pmap->pm_l0[l0index]; tl0 = pmap_load(l0); if (tl0 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index, lockp) == NULL) { --m->wire_count; /* XXX: release mem barrier? */ atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } } else { l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); l1pg->wire_count++; } l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); l1 = &l1[ptepindex & Ln_ADDR_MASK]; pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE); PTE_SYNC(l1); } else { vm_pindex_t l0index, l1index; pd_entry_t *l0, *l1, *l2; pd_entry_t tl0, tl1; l1index = ptepindex >> Ln_ENTRIES_SHIFT; l0index = l1index >> L0_ENTRIES_SHIFT; l0 = &pmap->pm_l0[l0index]; tl0 = pmap_load(l0); if (tl0 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + l1index, lockp) == NULL) { --m->wire_count; atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } tl0 = pmap_load(l0); l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); l1 = &l1[l1index & Ln_ADDR_MASK]; } else { l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); l1 = &l1[l1index & Ln_ADDR_MASK]; tl1 = pmap_load(l1); if (tl1 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + l1index, lockp) == NULL) { --m->wire_count; /* XXX: release mem barrier? */ atomic_subtract_int( &vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } } else { l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); l2pg->wire_count++; } } l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); l2 = &l2[ptepindex & Ln_ADDR_MASK]; pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE); PTE_SYNC(l2); } pmap_resident_count_inc(pmap, 1); return (m); } static vm_page_t pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) { vm_pindex_t ptepindex; pd_entry_t *pde, tpde; #ifdef INVARIANTS pt_entry_t *pte; #endif vm_page_t m; int lvl; /* * Calculate pagetable page index */ ptepindex = pmap_l2_pindex(va); retry: /* * Get the page directory entry */ pde = pmap_pde(pmap, va, &lvl); /* * If the page table page is mapped, we just increment the hold count, * and activate it. If we get a level 2 pde it will point to a level 3 * table. */ switch (lvl) { case -1: break; case 0: #ifdef INVARIANTS pte = pmap_l0_to_l1(pde, va); KASSERT(pmap_load(pte) == 0, ("pmap_alloc_l3: TODO: l0 superpages")); #endif break; case 1: #ifdef INVARIANTS pte = pmap_l1_to_l2(pde, va); KASSERT(pmap_load(pte) == 0, ("pmap_alloc_l3: TODO: l1 superpages")); #endif break; case 2: tpde = pmap_load(pde); if (tpde != 0) { m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK); m->wire_count++; return (m); } break; default: panic("pmap_alloc_l3: Invalid level %d", lvl); } /* * Here if the pte page isn't mapped, or if it has been deallocated. */ m = _pmap_alloc_l3(pmap, ptepindex, lockp); if (m == NULL && lockp != NULL) goto retry; return (m); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { vm_page_t m; KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); KASSERT(vm_radix_is_empty(&pmap->pm_root), ("pmap_release: pmap has reserved page table page(s)")); m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0)); m->wire_count--; atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); } static int kvm_size(SYSCTL_HANDLER_ARGS) { unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; return sysctl_handle_long(oidp, &ksize, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_size, "LU", "Size of KVM"); static int kvm_free(SYSCTL_HANDLER_ARGS) { unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; return sysctl_handle_long(oidp, &kfree, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_free, "LU", "Amount of KVM free"); /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { vm_paddr_t paddr; vm_page_t nkpg; pd_entry_t *l0, *l1, *l2; mtx_assert(&kernel_map->system_mtx, MA_OWNED); addr = roundup2(addr, L2_SIZE); if (addr - 1 >= kernel_map->max_offset) addr = kernel_map->max_offset; while (kernel_vm_end < addr) { l0 = pmap_l0(kernel_pmap, kernel_vm_end); KASSERT(pmap_load(l0) != 0, ("pmap_growkernel: No level 0 kernel entry")); l1 = pmap_l0_to_l1(l0, kernel_vm_end); if (pmap_load(l1) == 0) { /* We need a new PDP entry */ nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); if ((nkpg->flags & PG_ZERO) == 0) pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); pmap_load_store(l1, paddr | L1_TABLE); PTE_SYNC(l1); continue; /* try again */ } l2 = pmap_l1_to_l2(l1, kernel_vm_end); if ((pmap_load(l2) & ATTR_AF) != 0) { kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } continue; } nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); if ((nkpg->flags & PG_ZERO) == 0) pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); pmap_load_store(l2, paddr | L2_TABLE); PTE_SYNC(l2); pmap_invalidate_page(kernel_pmap, kernel_vm_end); kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } } } /*************************************************** * page management routines. ***************************************************/ CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); CTASSERT(_NPCM == 3); CTASSERT(_NPCPV == 168); static __inline struct pv_chunk * pv_to_chunk(pv_entry_t pv) { return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); } #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) #define PC_FREE0 0xfffffffffffffffful #define PC_FREE1 0xfffffffffffffffful #define PC_FREE2 0x000000fffffffffful static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 }; #if 0 #ifdef PV_STATS static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, "Current number of pv entry chunks"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, "Current number of pv entry chunks allocated"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, "Current number of pv entry chunks frees"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, "Number of times tried to get a chunk page but failed."); static long pv_entry_frees, pv_entry_allocs, pv_entry_count; static int pv_entry_spare; SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, "Current number of pv entry frees"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, "Current number of pv entry allocs"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, "Current number of pv entries"); SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, "Current number of spare pv entries"); #endif #endif /* 0 */ /* * We are in a serious low memory condition. Resort to * drastic measures to free some pages so we can allocate * another pv entry chunk. * * Returns NULL if PV entries were reclaimed from the specified pmap. * * We do not, however, unmap 2mpages because subsequent accesses will * allocate per-page pv entries until repromotion occurs, thereby * exacerbating the shortage of free pv entries. */ static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) { struct pch new_tail; struct pv_chunk *pc; struct md_page *pvh; pd_entry_t *pde; pmap_t pmap; pt_entry_t *pte, tpte; pv_entry_t pv; vm_offset_t va; vm_page_t m, m_pc; struct spglist free; uint64_t inuse; int bit, field, freed, lvl; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); pmap = NULL; m_pc = NULL; SLIST_INIT(&free); TAILQ_INIT(&new_tail); mtx_lock(&pv_chunks_mutex); while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) { TAILQ_REMOVE(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); if (pmap != pc->pc_pmap) { if (pmap != NULL && pmap != locked_pmap) PMAP_UNLOCK(pmap); pmap = pc->pc_pmap; /* Avoid deadlock and lock recursion. */ if (pmap > locked_pmap) { RELEASE_PV_LIST_LOCK(lockp); PMAP_LOCK(pmap); } else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { pmap = NULL; TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); mtx_lock(&pv_chunks_mutex); continue; } } /* * Destroy every non-wired, 4 KB page mapping in the chunk. */ freed = 0; for (field = 0; field < _NPCM; field++) { for (inuse = ~pc->pc_map[field] & pc_freemask[field]; inuse != 0; inuse &= ~(1UL << bit)) { bit = ffsl(inuse) - 1; pv = &pc->pc_pventry[field * 64 + bit]; va = pv->pv_va; pde = pmap_pde(pmap, va, &lvl); if (lvl != 2) continue; pte = pmap_l2_to_l3(pde, va); tpte = pmap_load(pte); if ((tpte & ATTR_SW_WIRED) != 0) continue; tpte = pmap_load_clear(pte); PTE_SYNC(pte); pmap_invalidate_page(pmap, va); m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK); if (pmap_page_dirty(tpte)) vm_page_dirty(m); if ((tpte & ATTR_AF) != 0) vm_page_aflag_set(m, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) { vm_page_aflag_clear(m, PGA_WRITEABLE); } } pc->pc_map[field] |= 1UL << bit; pmap_unuse_l3(pmap, va, pmap_load(pde), &free); freed++; } } if (freed == 0) { TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); mtx_lock(&pv_chunks_mutex); continue; } /* Every freed mapping is for a 4 KB page. */ pmap_resident_count_dec(pmap, freed); PV_STAT(atomic_add_long(&pv_entry_frees, freed)); PV_STAT(atomic_add_int(&pv_entry_spare, freed)); PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 && pc->pc_map[2] == PC_FREE2) { PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m_pc->phys_addr); mtx_lock(&pv_chunks_mutex); break; } TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); mtx_lock(&pv_chunks_mutex); /* One freed pv entry in locked_pmap is sufficient. */ if (pmap == locked_pmap) break; } TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); mtx_unlock(&pv_chunks_mutex); if (pmap != NULL && pmap != locked_pmap) PMAP_UNLOCK(pmap); if (m_pc == NULL && !SLIST_EMPTY(&free)) { m_pc = SLIST_FIRST(&free); SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ m_pc->wire_count = 1; atomic_add_int(&vm_cnt.v_wire_count, 1); } pmap_free_zero_pages(&free); return (m_pc); } /* * free the pv_entry back to the free list */ static void free_pv_entry(pmap_t pmap, pv_entry_t pv) { struct pv_chunk *pc; int idx, field, bit; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(atomic_add_long(&pv_entry_frees, 1)); PV_STAT(atomic_add_int(&pv_entry_spare, 1)); PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); pc = pv_to_chunk(pv); idx = pv - &pc->pc_pventry[0]; field = idx / 64; bit = idx % 64; pc->pc_map[field] |= 1ul << bit; if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 || pc->pc_map[2] != PC_FREE2) { /* 98% of the time, pc is already at the head of the list. */ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); } return; } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } static void free_pv_chunk(struct pv_chunk *pc) { vm_page_t m; mtx_lock(&pv_chunks_mutex); TAILQ_REMOVE(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); vm_page_unwire(m, PQ_NONE); vm_page_free(m); } /* * Returns a new PV entry, allocating a new PV chunk from the system when * needed. If this PV chunk allocation fails and a PV list lock pointer was * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is * returned. * * The given PV list lock may be released. */ static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp) { int bit, field; pv_entry_t pv; struct pv_chunk *pc; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); retry: pc = TAILQ_FIRST(&pmap->pm_pvchunk); if (pc != NULL) { for (field = 0; field < _NPCM; field++) { if (pc->pc_map[field]) { bit = ffsl(pc->pc_map[field]) - 1; break; } } if (field < _NPCM) { pv = &pc->pc_pventry[field * 64 + bit]; pc->pc_map[field] &= ~(1ul << bit); /* If this was the last item, move it to tail */ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } PV_STAT(atomic_add_long(&pv_entry_count, 1)); PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); return (pv); } } /* No free items, allocate another chunk */ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); return (NULL); } m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ pc->pc_map[1] = PC_FREE1; pc->pc_map[2] = PC_FREE2; mtx_lock(&pv_chunks_mutex); TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); pv = &pc->pc_pventry[0]; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); PV_STAT(atomic_add_long(&pv_entry_count, 1)); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); return (pv); } /* * Ensure that the number of spare PV entries in the specified pmap meets or * exceeds the given count, "needed". * * The given PV list lock may be released. */ static void reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) { struct pch new_tail; struct pv_chunk *pc; int avail, free; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); /* * Newly allocated PV chunks must be stored in a private list until * the required number of PV chunks have been allocated. Otherwise, * reclaim_pv_chunk() could recycle one of these chunks. In * contrast, these chunks must be added to the pmap upon allocation. */ TAILQ_INIT(&new_tail); retry: avail = 0; TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) { bit_count((bitstr_t *)pc->pc_map, 0, sizeof(pc->pc_map) * NBBY, &free); if (free == 0) break; avail += free; if (avail >= needed) break; } for (; avail < needed; avail += _NPCPV) { m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; pc->pc_map[0] = PC_FREE0; pc->pc_map[1] = PC_FREE1; pc->pc_map[2] = PC_FREE2; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); } if (!TAILQ_EMPTY(&new_tail)) { mtx_lock(&pv_chunks_mutex); TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); mtx_unlock(&pv_chunks_mutex); } } /* * First find and then remove the pv entry for the specified pmap and virtual * address from the specified pv list. Returns the pv entry if found and NULL * otherwise. This operation can be performed on pv lists for either 4KB or * 2MB page mappings. */ static __inline pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; break; } } return (pv); } /* * After demotion from a 2MB page mapping to 512 4KB page mappings, * destroy the pv entry for the 2MB page mapping and reinstantiate the pv * entries for each of the 4KB page mappings. */ static void pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp) { struct md_page *pvh; struct pv_chunk *pc; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; int bit, field; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((pa & L2_OFFSET) == 0, ("pmap_pv_demote_l2: pa is not 2mpage aligned")); CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); /* * Transfer the 2mpage's pv entry for this mapping to the first * page's pv list. Once this transfer begins, the pv list lock * must not be released until the last pv entry is reinstantiated. */ pvh = pa_to_pvh(pa); va = va & ~L2_OFFSET; pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found")); m = PHYS_TO_VM_PAGE(pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; /* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */ PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1)); va_last = va + L2_SIZE - PAGE_SIZE; for (;;) { pc = TAILQ_FIRST(&pmap->pm_pvchunk); KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 || pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare")); for (field = 0; field < _NPCM; field++) { while (pc->pc_map[field]) { bit = ffsl(pc->pc_map[field]) - 1; pc->pc_map[field] &= ~(1ul << bit); pv = &pc->pc_pventry[field * 64 + bit]; va += PAGE_SIZE; pv->pv_va = va; m++; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_pv_demote_l2: page %p is not managed", m)); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if (va == va_last) goto out; } } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } out: if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1)); PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1)); } /* * First find and then destroy the pv entry for the specified pmap and virtual * address. This operation can be performed on pv lists for either 4KB or 2MB * page mappings. */ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); free_pv_entry(pmap, pv); } /* * Conditionally create the PV entry for a 4KB page mapping if the required * memory can be allocated without resorting to reclamation. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* Pass NULL instead of the lock pointer to disable reclamation. */ if ((pv = get_pv_entry(pmap, NULL)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; return (TRUE); } else return (FALSE); } /* * pmap_remove_l3: do the things to unmap a page in a process */ static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, pd_entry_t l2e, struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; pt_entry_t old_l3; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3))) - cpu_dcache_wb_range(va, L3_SIZE); old_l3 = pmap_load_clear(l3); PTE_SYNC(l3); pmap_invalidate_page(pmap, va); if (old_l3 & ATTR_SW_WIRED) pmap->pm_stats.wired_count -= 1; pmap_resident_count_dec(pmap, 1); if (old_l3 & ATTR_SW_MANAGED) { m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); if (pmap_page_dirty(old_l3)) vm_page_dirty(m); if (old_l3 & ATTR_AF) vm_page_aflag_set(m, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } } return (pmap_unuse_l3(pmap, va, l2e, free)); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { struct rwlock *lock; vm_offset_t va, va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t l3_paddr, *l3; struct spglist free; int anyvalid; /* * Perform an unsynchronized read. This is, however, safe. */ if (pmap->pm_stats.resident_count == 0) return; anyvalid = 0; SLIST_INIT(&free); PMAP_LOCK(pmap); lock = NULL; for (; sva < eva; sva = va_next) { if (pmap->pm_stats.resident_count == 0) break; l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; continue; } /* * Calculate index for next page table. */ va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (l2 == NULL) continue; l3_paddr = pmap_load(l2); if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) { /* TODO: Add pmap_remove_l2 */ if (pmap_demote_l2_locked(pmap, l2, sva & ~L2_OFFSET, &lock) == NULL) continue; l3_paddr = pmap_load(l2); } /* * Weed out invalid mappings. */ if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE) continue; /* * Limit our scan to either the end of the va represented * by the current page table page, or to the end of the * range being removed. */ if (va_next > eva) va_next = eva; va = va_next; for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { if (l3 == NULL) panic("l3 == NULL"); if (pmap_load(l3) == 0) { if (va != va_next) { pmap_invalidate_range(pmap, va, sva); va = va_next; } continue; } if (va == va_next) va = sva; if (pmap_remove_l3(pmap, l3, sva, l3_paddr, &free, &lock)) { sva += L3_SIZE; break; } } if (va != va_next) pmap_invalidate_range(pmap, va, sva); } if (lock != NULL) rw_wunlock(lock); if (anyvalid) pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); pmap_free_zero_pages(&free); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { struct md_page *pvh; pv_entry_t pv; pmap_t pmap; struct rwlock *lock; pd_entry_t *pde, tpde; pt_entry_t *pte, tpte; vm_offset_t va; struct spglist free; int lvl, pvh_gen, md_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); SLIST_INIT(&free); lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); retry: rw_wlock(lock); while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { rw_wunlock(lock); PMAP_UNLOCK(pmap); goto retry; } } va = pv->pv_va; pte = pmap_pte(pmap, va, &lvl); KASSERT(pte != NULL, ("pmap_remove_all: no page table entry found")); KASSERT(lvl == 2, ("pmap_remove_all: invalid pte level %d", lvl)); pmap_demote_l2_locked(pmap, pte, va, &lock); PMAP_UNLOCK(pmap); } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { rw_wunlock(lock); PMAP_UNLOCK(pmap); goto retry; } } pmap_resident_count_dec(pmap, 1); pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("pmap_remove_all: no page directory entry found")); KASSERT(lvl == 2, ("pmap_remove_all: invalid pde level %d", lvl)); tpde = pmap_load(pde); pte = pmap_l2_to_l3(pde, pv->pv_va); tpte = pmap_load(pte); - if (pmap_is_current(pmap) && - pmap_l3_valid_cacheable(tpte)) - cpu_dcache_wb_range(pv->pv_va, L3_SIZE); pmap_load_clear(pte); PTE_SYNC(pte); pmap_invalidate_page(pmap, pv->pv_va); if (tpte & ATTR_SW_WIRED) pmap->pm_stats.wired_count--; if ((tpte & ATTR_AF) != 0) vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. */ if (pmap_page_dirty(tpte)) vm_page_dirty(m); pmap_unuse_l3(pmap, pv->pv_va, tpde, &free); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(lock); pmap_free_zero_pages(&free); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t va, va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t *l3p, l3, nbits; KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); if (prot == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE)) return; PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; continue; } va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (pmap_load(l2) == 0) continue; if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) { l3p = pmap_demote_l2(pmap, l2, sva); if (l3p == NULL) continue; } KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_protect: Invalid L2 entry after demotion")); if (va_next > eva) va_next = eva; va = va_next; for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, sva += L3_SIZE) { l3 = pmap_load(l3p); if (!pmap_l3_valid(l3)) continue; nbits = 0; if ((prot & VM_PROT_WRITE) == 0) { if ((l3 & ATTR_SW_MANAGED) && pmap_page_dirty(l3)) { vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK)); } nbits |= ATTR_AP(ATTR_AP_RO); } if ((prot & VM_PROT_EXECUTE) == 0) nbits |= ATTR_XN; pmap_set(l3p, nbits); PTE_SYNC(l3p); /* XXX: Use pmap_invalidate_range */ pmap_invalidate_page(pmap, va); } } PMAP_UNLOCK(pmap); /* TODO: Only invalidate entries we are touching */ pmap_invalidate_all(pmap); } /* * Inserts the specified page table page into the specified pmap's collection * of idle page table pages. Each of a pmap's page table pages is responsible * for mapping a distinct range of virtual addresses. The pmap's collection is * ordered by this virtual address range. */ static __inline int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); return (vm_radix_insert(&pmap->pm_root, mpte)); } /* * Removes the page table page mapping the specified virtual address from the * specified pmap's collection of idle page table pages, and returns it. * Otherwise, returns NULL if there is no page table page corresponding to the * specified virtual address. */ static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va))); } /* * Performs a break-before-make update of a pmap entry. This is needed when * either promoting or demoting pages to ensure the TLB doesn't get into an * inconsistent state. */ static void pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte, vm_offset_t va, vm_size_t size) { register_t intr; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * Ensure we don't get switched out with the page table in an * inconsistent state. We also need to ensure no interrupts fire * as they may make use of an address we are about to invalidate. */ intr = intr_disable(); critical_enter(); /* Clear the old mapping */ pmap_load_clear(pte); PTE_SYNC(pte); pmap_invalidate_range(pmap, va, va + size); /* Create the new mapping */ pmap_load_store(pte, newpte); PTE_SYNC(pte); critical_exit(); intr_restore(intr); } /* * After promotion from 512 4KB page mappings to a single 2MB page mapping, * replace the many pv entries for the 4KB page mappings by a single pv entry * for the 2MB page mapping. */ static void pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp) { struct md_page *pvh; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; KASSERT((pa & L2_OFFSET) == 0, ("pmap_pv_promote_l2: pa is not 2mpage aligned")); CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); /* * Transfer the first page's pv entry for this mapping to the 2mpage's * pv list. Aside from avoiding the cost of a call to get_pv_entry(), * a transfer avoids the possibility that get_pv_entry() calls * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the * mappings that is being promoted. */ m = PHYS_TO_VM_PAGE(pa); va = va & ~L2_OFFSET; pv = pmap_pvh_remove(&m->md, pmap, va); KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found")); pvh = pa_to_pvh(pa); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; /* Free the remaining NPTEPG - 1 pv entries. */ va_last = va + L2_SIZE - PAGE_SIZE; do { m++; va += PAGE_SIZE; pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } /* * Tries to promote the 512, contiguous 4KB page mappings that are within a * single level 2 table entry to a single 2MB page mapping. For promotion * to occur, two conditions must be met: (1) the 4KB page mappings must map * aligned, contiguous physical memory and (2) the 4KB page mappings must have * identical characteristics. */ static void pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, struct rwlock **lockp) { pt_entry_t *firstl3, *l3, newl2, oldl3, pa; vm_page_t mpte; vm_offset_t sva; PMAP_LOCK_ASSERT(pmap, MA_OWNED); sva = va & ~L2_OFFSET; firstl3 = pmap_l2_to_l3(l2, sva); newl2 = pmap_load(firstl3); /* Check the alingment is valid */ if (((newl2 & ~ATTR_MASK) & L2_OFFSET) != 0) { atomic_add_long(&pmap_l2_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" " in pmap %p", va, pmap); return; } pa = newl2 + L2_SIZE - PAGE_SIZE; for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) { oldl3 = pmap_load(l3); if (oldl3 != pa) { atomic_add_long(&pmap_l2_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx" " in pmap %p", va, pmap); return; } pa -= PAGE_SIZE; } /* * Save the page table page in its current state until the L2 * mapping the superpage is demoted by pmap_demote_l2() or * destroyed by pmap_remove_l3(). */ mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], ("pmap_promote_l2: page table page is out of range")); KASSERT(mpte->pindex == pmap_l2_pindex(va), ("pmap_promote_l2: page table page's pindex is wrong")); if (pmap_insert_pt_page(pmap, mpte)) { atomic_add_long(&pmap_l2_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx in pmap %p", va, pmap); return; } if ((newl2 & ATTR_SW_MANAGED) != 0) pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp); newl2 &= ~ATTR_DESCR_MASK; newl2 |= L2_BLOCK; pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE); atomic_add_long(&pmap_l2_promotions, 1); CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va, pmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind __unused) { struct rwlock *lock; pd_entry_t *pde; pt_entry_t new_l3, orig_l3; pt_entry_t *l2, *l3; pv_entry_t pv; vm_paddr_t opa, pa, l1_pa, l2_pa, l3_pa; vm_page_t mpte, om, l1_m, l2_m, l3_m; boolean_t nosleep; int lvl; va = trunc_page(va); if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) | L3_PAGE); if ((prot & VM_PROT_WRITE) == 0) new_l3 |= ATTR_AP(ATTR_AP_RO); if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY) new_l3 |= ATTR_XN; if ((flags & PMAP_ENTER_WIRED) != 0) new_l3 |= ATTR_SW_WIRED; if (va < VM_MAXUSER_ADDRESS) new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN; CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); mpte = NULL; lock = NULL; PMAP_LOCK(pmap); pde = pmap_pde(pmap, va, &lvl); if (pde != NULL && lvl == 1) { l2 = pmap_l1_to_l2(pde, va); if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK && (l3 = pmap_demote_l2_locked(pmap, l2, va & ~L2_OFFSET, &lock)) != NULL) { l3 = &l3[pmap_l3_index(va)]; if (va < VM_MAXUSER_ADDRESS) { mpte = PHYS_TO_VM_PAGE( pmap_load(l2) & ~ATTR_MASK); mpte->wire_count++; } goto havel3; } } if (va < VM_MAXUSER_ADDRESS) { nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock); if (mpte == NULL && nosleep) { CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); return (KERN_RESOURCE_SHORTAGE); } pde = pmap_pde(pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_enter: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_enter: Invalid level %d", lvl)); l3 = pmap_l2_to_l3(pde, va); } else { /* * If we get a level 2 pde it must point to a level 3 entry * otherwise we will need to create the intermediate tables */ if (lvl < 2) { switch(lvl) { default: case -1: /* Get the l0 pde to update */ pde = pmap_l0(pmap, va); KASSERT(pde != NULL, ("...")); l1_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (l1_m == NULL) panic("pmap_enter: l1 pte_m == NULL"); if ((l1_m->flags & PG_ZERO) == 0) pmap_zero_page(l1_m); l1_pa = VM_PAGE_TO_PHYS(l1_m); pmap_load_store(pde, l1_pa | L0_TABLE); PTE_SYNC(pde); /* FALLTHROUGH */ case 0: /* Get the l1 pde to update */ pde = pmap_l1_to_l2(pde, va); KASSERT(pde != NULL, ("...")); l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (l2_m == NULL) panic("pmap_enter: l2 pte_m == NULL"); if ((l2_m->flags & PG_ZERO) == 0) pmap_zero_page(l2_m); l2_pa = VM_PAGE_TO_PHYS(l2_m); pmap_load_store(pde, l2_pa | L1_TABLE); PTE_SYNC(pde); /* FALLTHROUGH */ case 1: /* Get the l2 pde to update */ pde = pmap_l1_to_l2(pde, va); l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (l3_m == NULL) panic("pmap_enter: l3 pte_m == NULL"); if ((l3_m->flags & PG_ZERO) == 0) pmap_zero_page(l3_m); l3_pa = VM_PAGE_TO_PHYS(l3_m); pmap_load_store(pde, l3_pa | L2_TABLE); PTE_SYNC(pde); break; } } l3 = pmap_l2_to_l3(pde, va); pmap_invalidate_page(pmap, va); } havel3: om = NULL; orig_l3 = pmap_load(l3); opa = orig_l3 & ~ATTR_MASK; /* * Is the specified virtual address already mapped? */ if (pmap_l3_valid(orig_l3)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if ((flags & PMAP_ENTER_WIRED) != 0 && (orig_l3 & ATTR_SW_WIRED) == 0) pmap->pm_stats.wired_count++; else if ((flags & PMAP_ENTER_WIRED) == 0 && (orig_l3 & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count--; /* * Remove the extra PT page reference. */ if (mpte != NULL) { mpte->wire_count--; KASSERT(mpte->wire_count > 0, ("pmap_enter: missing reference to page table page," " va: 0x%lx", va)); } /* * Has the physical page changed? */ if (opa == pa) { /* * No, might be a protection or wiring change. */ if ((orig_l3 & ATTR_SW_MANAGED) != 0) { new_l3 |= ATTR_SW_MANAGED; if ((new_l3 & ATTR_AP(ATTR_AP_RW)) == ATTR_AP(ATTR_AP_RW)) { vm_page_aflag_set(m, PGA_WRITEABLE); } } goto validate; } - - /* Flush the cache, there might be uncommitted data in it */ - if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3)) - cpu_dcache_wb_range(va, L3_SIZE); } else { /* * Increment the counters. */ if ((new_l3 & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count++; pmap_resident_count_inc(pmap, 1); } /* * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0) { new_l3 |= ATTR_SW_MANAGED; pv = get_pv_entry(pmap, &lock); pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if ((new_l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) vm_page_aflag_set(m, PGA_WRITEABLE); } /* * Update the L3 entry. */ if (orig_l3 != 0) { validate: orig_l3 = pmap_load(l3); opa = orig_l3 & ~ATTR_MASK; if (opa != pa) { pmap_update_entry(pmap, l3, new_l3, va, PAGE_SIZE); if ((orig_l3 & ATTR_SW_MANAGED) != 0) { om = PHYS_TO_VM_PAGE(opa); if (pmap_page_dirty(orig_l3)) vm_page_dirty(om); if ((orig_l3 & ATTR_AF) != 0) vm_page_aflag_set(om, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); pmap_pvh_free(&om->md, pmap, va); if ((om->aflags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&om->md.pv_list) && ((om->flags & PG_FICTITIOUS) != 0 || TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) vm_page_aflag_clear(om, PGA_WRITEABLE); } } else { pmap_load_store(l3, new_l3); PTE_SYNC(l3); pmap_invalidate_page(pmap, va); if (pmap_page_dirty(orig_l3) && (orig_l3 & ATTR_SW_MANAGED) != 0) vm_page_dirty(m); } } else { pmap_load_store(l3, new_l3); } PTE_SYNC(l3); pmap_invalidate_page(pmap, va); if (pmap != pmap_kernel()) { if (pmap == &curproc->p_vmspace->vm_pmap && (prot & VM_PROT_EXECUTE) != 0) cpu_icache_sync_range(va, PAGE_SIZE); if ((mpte == NULL || mpte->wire_count == NL3PG) && pmap_superpages_enabled() && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { pmap_promote_l2(pmap, pde, va, &lock); } } if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); return (KERN_SUCCESS); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { struct rwlock *lock; vm_offset_t va; vm_page_t m, mpte; vm_pindex_t diff, psize; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); mpte = NULL; m = m_start; lock = NULL; PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { va = start + ptoa(diff); mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock); m = TAILQ_NEXT(m, listq); } if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { struct rwlock *lock; lock = NULL; PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); } static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) { struct spglist free; pd_entry_t *pde; pt_entry_t *l2, *l3; vm_paddr_t pa; int lvl; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { vm_pindex_t l2pindex; /* * Calculate pagetable page index */ l2pindex = pmap_l2_pindex(va); if (mpte && (mpte->pindex == l2pindex)) { mpte->wire_count++; } else { /* * Get the l2 entry */ pde = pmap_pde(pmap, va, &lvl); /* * If the page table page is mapped, we just increment * the hold count, and activate it. Otherwise, we * attempt to allocate a page table page. If this * attempt fails, we don't retry. Instead, we give up. */ if (lvl == 1) { l2 = pmap_l1_to_l2(pde, va); if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) return (NULL); } if (lvl == 2 && pmap_load(pde) != 0) { mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK); mpte->wire_count++; } else { /* * Pass NULL instead of the PV list lock * pointer, because we don't intend to sleep. */ mpte = _pmap_alloc_l3(pmap, l2pindex, NULL); if (mpte == NULL) return (mpte); } } l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); l3 = &l3[pmap_l3_index(va)]; } else { mpte = NULL; pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_enter_quick_locked: Invalid level %d", lvl)); l3 = pmap_l2_to_l3(pde, va); } if (pmap_load(l3) != 0) { if (mpte != NULL) { mpte->wire_count--; mpte = NULL; } return (mpte); } /* * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { if (mpte != NULL) { SLIST_INIT(&free); if (pmap_unwire_l3(pmap, va, mpte, &free)) { pmap_invalidate_page(pmap, va); pmap_free_zero_pages(&free); } mpte = NULL; } return (mpte); } /* * Increment counters */ pmap_resident_count_inc(pmap, 1); pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RO) | L3_PAGE; if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY) pa |= ATTR_XN; else if (va < VM_MAXUSER_ADDRESS) pa |= ATTR_PXN; /* * Now validate mapping with RO protection */ if ((m->oflags & VPO_UNMANAGED) == 0) pa |= ATTR_SW_MANAGED; pmap_load_store(l3, pa); PTE_SYNC(l3); pmap_invalidate_page(pmap, va); return (mpte); } /* * This code maps large physical mmap regions into the * processor address space. Note that some shortcuts * are taken, but the code works. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("pmap_object_init_pt: non-device object")); } /* * Clear the wired attribute from the mappings for the specified range of * addresses in the given pmap. Every valid mapping within that range * must have the wired attribute set. In contrast, invalid mappings * cannot have the wired attribute set, so they are ignored. * * The wired attribute of the page table entry is not a hardware feature, * so there is no need to invalidate any TLB entries. */ void pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t *l3; PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; continue; } va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (pmap_load(l2) == 0) continue; if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) { l3 = pmap_demote_l2(pmap, l2, sva); if (l3 == NULL) continue; } KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_unwire: Invalid l2 entry after demotion")); if (va_next > eva) va_next = eva; for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { if (pmap_load(l3) == 0) continue; if ((pmap_load(l3) & ATTR_SW_WIRED) == 0) panic("pmap_unwire: l3 %#jx is missing " "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3)); /* * PG_W must be cleared atomically. Although the pmap * lock synchronizes access to PG_W, another processor * could be setting PG_M and/or PG_A concurrently. */ atomic_clear_long(l3, ATTR_SW_WIRED); pmap->pm_stats.wired_count--; } } PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); pagezero((void *)va); } /* * pmap_zero_page_area zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. * * off and size may not cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); if (off == 0 && size == PAGE_SIZE) pagezero((void *)va); else bzero((char *)va + off, size); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); pagecopy((void *)src, (void *)dst); } int unmapped_buf_allowed = 1; void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize) { void *a_cp, *b_cp; vm_page_t m_a, m_b; vm_paddr_t p_a, p_b; vm_offset_t a_pg_offset, b_pg_offset; int cnt; while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; m_a = ma[a_offset >> PAGE_SHIFT]; p_a = m_a->phys_addr; b_pg_offset = b_offset & PAGE_MASK; m_b = mb[b_offset >> PAGE_SHIFT]; p_b = m_b->phys_addr; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); cnt = min(cnt, PAGE_SIZE - b_pg_offset); if (__predict_false(!PHYS_IN_DMAP(p_a))) { panic("!DMAP a %lx", p_a); } else { a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset; } if (__predict_false(!PHYS_IN_DMAP(p_b))) { panic("!DMAP b %lx", p_b); } else { b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset; } bcopy(a_cp, b_cp, cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; } } vm_offset_t pmap_quick_enter_page(vm_page_t m) { return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); } void pmap_quick_remove_page(vm_offset_t addr) { } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; struct rwlock *lock; pv_entry_t pv; int loops = 0; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } } rw_runlock(lock); return (rv); } /* * pmap_page_wired_mappings: * * Return the number of managed mappings to the given physical page * that are wired. */ int pmap_page_wired_mappings(vm_page_t m) { struct rwlock *lock; struct md_page *pvh; pmap_t pmap; pt_entry_t *pte; pv_entry_t pv; int count, lvl, md_gen, pvh_gen; if ((m->oflags & VPO_UNMANAGED) != 0) return (0); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: count = 0; TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte(pmap, pv->pv_va, &lvl); if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0) count++; PMAP_UNLOCK(pmap); } if ((m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; pvh_gen = pvh->pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen || pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte(pmap, pv->pv_va, &lvl); if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0) count++; PMAP_UNLOCK(pmap); } } rw_runlock(lock); return (count); } /* * Destroy all managed, non-wired mappings in the given user-space * pmap. This pmap cannot be active on any processor besides the * caller. * * This function cannot be applied to the kernel pmap. Moreover, it * is not intended for general use. It is only to be used during * process termination. Consequently, it can be implemented in ways * that make it faster than pmap_remove(). First, it can more quickly * destroy mappings by iterating over the pmap's collection of PV * entries, rather than searching the page table. Second, it doesn't * have to test and clear the page table entries atomically, because * no processor is currently accessing the user address space. In * particular, a page table entry's dirty bit won't change state once * this function starts. */ void pmap_remove_pages(pmap_t pmap) { pd_entry_t *pde; pt_entry_t *pte, tpte; struct spglist free; vm_page_t m, ml3, mt; pv_entry_t pv; struct md_page *pvh; struct pv_chunk *pc, *npc; struct rwlock *lock; int64_t bit; uint64_t inuse, bitmask; int allfree, field, freed, idx, lvl; vm_paddr_t pa; lock = NULL; SLIST_INIT(&free); PMAP_LOCK(pmap); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { allfree = 1; freed = 0; for (field = 0; field < _NPCM; field++) { inuse = ~pc->pc_map[field] & pc_freemask[field]; while (inuse != 0) { bit = ffsl(inuse) - 1; bitmask = 1UL << bit; idx = field * 64 + bit; pv = &pc->pc_pventry[idx]; inuse &= ~bitmask; pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("Attempting to remove an unmapped page")); switch(lvl) { case 1: pte = pmap_l1_to_l2(pde, pv->pv_va); tpte = pmap_load(pte); KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, ("Attempting to remove an invalid " "block: %lx", tpte)); tpte = pmap_load(pte); break; case 2: pte = pmap_l2_to_l3(pde, pv->pv_va); tpte = pmap_load(pte); KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE, ("Attempting to remove an invalid " "page: %lx", tpte)); break; default: panic( "Invalid page directory level: %d", lvl); } /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & ATTR_SW_WIRED) { allfree = 0; continue; } pa = tpte & ~ATTR_MASK; m = PHYS_TO_VM_PAGE(pa); KASSERT(m->phys_addr == pa, ("vm_page_t %p phys_addr mismatch %016jx %016jx", m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); KASSERT((m->flags & PG_FICTITIOUS) != 0 || m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad pte %#jx", (uintmax_t)tpte)); - if (pmap_is_current(pmap)) { - if (lvl == 2 && - pmap_l3_valid_cacheable(tpte)) { - cpu_dcache_wb_range(pv->pv_va, - L3_SIZE); - } else if (lvl == 1 && - pmap_pte_valid_cacheable(tpte)) { - cpu_dcache_wb_range(pv->pv_va, - L2_SIZE); - } - } pmap_load_clear(pte); PTE_SYNC(pte); - pmap_invalidate_page(pmap, pv->pv_va); /* * Update the vm_page_t clean/reference bits. */ if ((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) { switch (lvl) { case 1: for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) vm_page_dirty(m); break; case 2: vm_page_dirty(m); break; } } CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); /* Mark free */ pc->pc_map[field] |= bitmask; switch (lvl) { case 1: pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE); pvh = pa_to_pvh(tpte & ~ATTR_MASK); TAILQ_REMOVE(&pvh->pv_list, pv,pv_next); pvh->pv_gen++; if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++) if ((mt->aflags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&mt->md.pv_list)) vm_page_aflag_clear(mt, PGA_WRITEABLE); } ml3 = pmap_remove_pt_page(pmap, pv->pv_va); if (ml3 != NULL) { pmap_resident_count_dec(pmap,1); KASSERT(ml3->wire_count == NL3PG, ("pmap_remove_pages: l3 page wire count error")); ml3->wire_count = 0; pmap_add_delayed_free_list(ml3, &free, FALSE); atomic_subtract_int( &vm_cnt.v_wire_count, 1); } break; case 2: pmap_resident_count_dec(pmap, 1); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if ((m->aflags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh( VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } break; } pmap_unuse_l3(pmap, pv->pv_va, pmap_load(pde), &free); freed++; } } PV_STAT(atomic_add_long(&pv_entry_frees, freed)); PV_STAT(atomic_add_int(&pv_entry_spare, freed)); PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); if (allfree) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } } pmap_invalidate_all(pmap); if (lock != NULL) rw_wunlock(lock); PMAP_UNLOCK(pmap); pmap_free_zero_pages(&free); } /* * This is used to check if a page has been accessed or modified. As we * don't have a bit to see if it has been modified we have to assume it * has been if the page is read/write. */ static boolean_t pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) { struct rwlock *lock; pv_entry_t pv; struct md_page *pvh; pt_entry_t *pte, mask, value; pmap_t pmap; int lvl, md_gen, pvh_gen; boolean_t rv; rv = FALSE; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte(pmap, pv->pv_va, &lvl); KASSERT(lvl == 3, ("pmap_page_test_mappings: Invalid level %d", lvl)); mask = 0; value = 0; if (modified) { mask |= ATTR_AP_RW_BIT; value |= ATTR_AP(ATTR_AP_RW); } if (accessed) { mask |= ATTR_AF | ATTR_DESCR_MASK; value |= ATTR_AF | L3_PAGE; } rv = (pmap_load(pte) & mask) == value; PMAP_UNLOCK(pmap); if (rv) goto out; } if ((m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; pvh_gen = pvh->pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen || pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte(pmap, pv->pv_va, &lvl); KASSERT(lvl == 2, ("pmap_page_test_mappings: Invalid level %d", lvl)); mask = 0; value = 0; if (modified) { mask |= ATTR_AP_RW_BIT; value |= ATTR_AP(ATTR_AP_RW); } if (accessed) { mask |= ATTR_AF | ATTR_DESCR_MASK; value |= ATTR_AF | L2_BLOCK; } rv = (pmap_load(pte) & mask) == value; PMAP_UNLOCK(pmap); if (rv) goto out; } } out: rw_runlock(lock); return (rv); } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); return (pmap_page_test_mappings(m, FALSE, TRUE)); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is eligible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pt_entry_t *pte; boolean_t rv; int lvl; rv = FALSE; PMAP_LOCK(pmap); pte = pmap_pte(pmap, addr, &lvl); if (pte != NULL && pmap_load(pte) != 0) { rv = TRUE; } PMAP_UNLOCK(pmap); return (rv); } /* * pmap_is_referenced: * * Return whether or not the specified physical page was referenced * in any physical maps. */ boolean_t pmap_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); return (pmap_page_test_mappings(m, TRUE, FALSE)); } /* * Clear the write and modified bits in each of the given page's mappings. */ void pmap_remove_write(vm_page_t m) { struct md_page *pvh; pmap_t pmap; struct rwlock *lock; pv_entry_t next_pv, pv; pt_entry_t oldpte, *pte; vm_offset_t va; int lvl, md_gen, pvh_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * set by another thread while the object is locked. Thus, * if PGA_WRITEABLE is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return; lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); retry_pv_loop: rw_wlock(lock); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); rw_wunlock(lock); goto retry_pv_loop; } } va = pv->pv_va; pte = pmap_pte(pmap, pv->pv_va, &lvl); if ((pmap_load(pte) & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) pmap_demote_l2_locked(pmap, pte, va & ~L2_OFFSET, &lock); KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), ("inconsistent pv lock %p %p for page %p", lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); PMAP_UNLOCK(pmap); } TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); rw_wunlock(lock); goto retry_pv_loop; } } pte = pmap_pte(pmap, pv->pv_va, &lvl); retry: oldpte = pmap_load(pte); if ((oldpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) { if (!atomic_cmpset_long(pte, oldpte, oldpte | ATTR_AP(ATTR_AP_RO))) goto retry; if ((oldpte & ATTR_AF) != 0) vm_page_dirty(m); pmap_invalidate_page(pmap, pv->pv_va); } PMAP_UNLOCK(pmap); } rw_wunlock(lock); vm_page_aflag_clear(m, PGA_WRITEABLE); } static __inline boolean_t safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte) { return (FALSE); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * As an optimization, update the page's dirty field if a modified bit is * found while counting reference bits. This opportunistic update can be * performed at low cost and can eliminate the need for some future calls * to pmap_is_modified(). However, since this function stops after * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some * dirty pages. Those dirty pages will only be detected by a future call * to pmap_is_modified(). */ int pmap_ts_referenced(vm_page_t m) { struct md_page *pvh; pv_entry_t pv, pvf; pmap_t pmap; struct rwlock *lock; pd_entry_t *pde, tpde; pt_entry_t *pte, tpte; pt_entry_t *l3; vm_offset_t va; vm_paddr_t pa; int cleared, md_gen, not_cleared, lvl, pvh_gen; struct spglist free; bool demoted; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); SLIST_INIT(&free); cleared = 0; pa = VM_PAGE_TO_PHYS(m); lock = PHYS_TO_PV_LIST_LOCK(pa); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa); rw_wlock(lock); retry: not_cleared = 0; if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) goto small_mappings; pv = pvf; do { if (pvf == NULL) pvf = pv; pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } va = pv->pv_va; pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found")); KASSERT(lvl == 1, ("pmap_ts_referenced: invalid pde level %d", lvl)); tpde = pmap_load(pde); KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE, ("pmap_ts_referenced: found an invalid l1 table")); pte = pmap_l1_to_l2(pde, pv->pv_va); tpte = pmap_load(pte); if (pmap_page_dirty(tpte)) { /* * Although "tpte" is mapping a 2MB page, because * this function is called at a 4KB page granularity, * we only update the 4KB page under test. */ vm_page_dirty(m); } if ((tpte & ATTR_AF) != 0) { /* * Since this reference bit is shared by 512 4KB * pages, it should not be cleared every time it is * tested. Apply a simple "hash" function on the * physical page number, the virtual superpage number, * and the pmap address to select one 4KB page out of * the 512 on which testing the reference bit will * result in clearing that reference bit. This * function is designed to avoid the selection of the * same 4KB page for every 2MB page mapping. * * On demotion, a mapping that hasn't been referenced * is simply destroyed. To avoid the possibility of a * subsequent page fault on a demoted wired mapping, * always leave its reference bit set. Moreover, * since the superpage is wired, the current state of * its reference bit won't affect page replacement. */ if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^ (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 && (tpte & ATTR_SW_WIRED) == 0) { if (safe_to_clear_referenced(pmap, tpte)) { /* * TODO: We don't handle the access * flag at all. We need to be able * to set it in the exception handler. */ panic("ARM64TODO: " "safe_to_clear_referenced\n"); } else if (pmap_demote_l2_locked(pmap, pte, pv->pv_va, &lock) != NULL) { demoted = true; va += VM_PAGE_TO_PHYS(m) - (tpte & ~ATTR_MASK); l3 = pmap_l2_to_l3(pte, va); pmap_remove_l3(pmap, l3, va, pmap_load(pte), NULL, &lock); } else demoted = true; if (demoted) { /* * The superpage mapping was removed * entirely and therefore 'pv' is no * longer valid. */ if (pvf == pv) pvf = NULL; pv = NULL; } cleared++; KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), ("inconsistent pv lock %p %p for page %p", lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); } else not_cleared++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; } if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX) goto out; } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); small_mappings: if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) goto out; pv = pvf; do { if (pvf == NULL) pvf = pv; pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { pvh_gen = pvh->pv_gen; md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found")); KASSERT(lvl == 2, ("pmap_ts_referenced: invalid pde level %d", lvl)); tpde = pmap_load(pde); KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_ts_referenced: found an invalid l2 table")); pte = pmap_l2_to_l3(pde, pv->pv_va); tpte = pmap_load(pte); if (pmap_page_dirty(tpte)) vm_page_dirty(m); if ((tpte & ATTR_AF) != 0) { if (safe_to_clear_referenced(pmap, tpte)) { /* * TODO: We don't handle the access flag * at all. We need to be able to set it in * the exception handler. */ panic("ARM64TODO: safe_to_clear_referenced\n"); } else if ((tpte & ATTR_SW_WIRED) == 0) { /* * Wired pages cannot be paged out so * doing accessed bit emulation for * them is wasted effort. We do the * hard work for unwired pages only. */ pmap_remove_l3(pmap, pte, pv->pv_va, tpde, &free, &lock); pmap_invalidate_page(pmap, pv->pv_va); cleared++; if (pvf == pv) pvf = NULL; pv = NULL; KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), ("inconsistent pv lock %p %p for page %p", lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); } else not_cleared++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; } } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + not_cleared < PMAP_TS_REFERENCED_MAX); out: rw_wunlock(lock); pmap_free_zero_pages(&free); return (cleared + not_cleared); } /* * Apply the given advice to the specified range of addresses within the * given pmap. Depending on the advice, clear the referenced and/or * modified flags in each mapping and set the mapped page's dirty field. */ void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) { } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT(!vm_page_xbusied(m), ("pmap_clear_modify: page %p is exclusive busied", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; /* ARM64TODO: We lack support for tracking if a page is modified */ } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { return ((void *)PHYS_TO_DMAP(pa)); } void pmap_unmapbios(vm_paddr_t pa, vm_size_t size) { } /* * Sets the memory attribute for the specified page. */ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { m->md.pv_memattr = ma; /* * If "m" is a normal page, update its direct mapping. This update * can be relied upon to perform any cache operations that are * required for data coherence. */ if ((m->flags & PG_FICTITIOUS) == 0 && pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, m->md.pv_memattr) != 0) panic("memory attribute change on the direct map failed"); } /* * Changes the specified virtual address range's memory type to that given by * the parameter "mode". The specified virtual address range must be * completely contained within either the direct map or the kernel map. If * the virtual address range is contained within the kernel map, then the * memory type for each of the corresponding ranges of the direct map is also * changed. (The corresponding ranges of the direct map are those ranges that * map the same physical pages as the specified virtual address range.) These * changes to the direct map are necessary because Intel describes the * behavior of their processors as "undefined" if two or more mappings to the * same physical page have different memory types. * * Returns zero if the change completed successfully, and either EINVAL or * ENOMEM if the change failed. Specifically, EINVAL is returned if some part * of the virtual address range was not mapped, and ENOMEM is returned if * there was insufficient memory available to complete the change. In the * latter case, the memory type may have been changed on some part of the * virtual address range or the direct map. */ static int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) { int error; PMAP_LOCK(kernel_pmap); error = pmap_change_attr_locked(va, size, mode); PMAP_UNLOCK(kernel_pmap); return (error); } static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode) { vm_offset_t base, offset, tmpva; pt_entry_t l3, *pte, *newpte; int lvl; PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); base = trunc_page(va); offset = va & PAGE_MASK; size = round_page(offset + size); if (!VIRT_IN_DMAP(base)) return (EINVAL); for (tmpva = base; tmpva < base + size; ) { pte = pmap_pte(kernel_pmap, va, &lvl); if (pte == NULL) return (EINVAL); if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) { /* * We already have the correct attribute, * ignore this entry. */ switch (lvl) { default: panic("Invalid DMAP table level: %d\n", lvl); case 1: tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE; break; case 2: tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE; break; case 3: tmpva += PAGE_SIZE; break; } } else { /* * Split the entry to an level 3 table, then * set the new attribute. */ switch (lvl) { default: panic("Invalid DMAP table level: %d\n", lvl); case 1: newpte = pmap_demote_l1(kernel_pmap, pte, tmpva & ~L1_OFFSET); if (newpte == NULL) return (EINVAL); pte = pmap_l1_to_l2(pte, tmpva); case 2: newpte = pmap_demote_l2(kernel_pmap, pte, tmpva & ~L2_OFFSET); if (newpte == NULL) return (EINVAL); pte = pmap_l2_to_l3(pte, tmpva); case 3: /* Update the entry */ l3 = pmap_load(pte); l3 &= ~ATTR_IDX_MASK; l3 |= ATTR_IDX(mode); if (mode == DEVICE_MEMORY) l3 |= ATTR_XN; pmap_update_entry(kernel_pmap, pte, l3, tmpva, PAGE_SIZE); /* * If moving to a non-cacheable entry flush * the cache. */ if (mode == VM_MEMATTR_UNCACHEABLE) cpu_dcache_wbinv_range(tmpva, L3_SIZE); break; } tmpva += PAGE_SIZE; } } return (0); } /* * Create an L2 table to map all addresses within an L1 mapping. */ static pt_entry_t * pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va) { pt_entry_t *l2, newl2, oldl1; vm_offset_t tmpl1; vm_paddr_t l2phys, phys; vm_page_t ml2; int i; PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldl1 = pmap_load(l1); KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK, ("pmap_demote_l1: Demoting a non-block entry")); KASSERT((va & L1_OFFSET) == 0, ("pmap_demote_l1: Invalid virtual address %#lx", va)); KASSERT((oldl1 & ATTR_SW_MANAGED) == 0, ("pmap_demote_l1: Level 1 table shouldn't be managed")); tmpl1 = 0; if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) { tmpl1 = kva_alloc(PAGE_SIZE); if (tmpl1 == 0) return (NULL); } if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx" " in pmap %p", va, pmap); return (NULL); } l2phys = VM_PAGE_TO_PHYS(ml2); l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys); /* Address the range points at */ phys = oldl1 & ~ATTR_MASK; /* The attributed from the old l1 table to be copied */ newl2 = oldl1 & ATTR_MASK; /* Create the new entries */ for (i = 0; i < Ln_ENTRIES; i++) { l2[i] = newl2 | phys; phys += L2_SIZE; } cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE); KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK), ("Invalid l2 page (%lx != %lx)", l2[0], (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK)); if (tmpl1 != 0) { pmap_kenter(tmpl1, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY); l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK)); } pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE); if (tmpl1 != 0) { pmap_kremove(tmpl1); kva_free(tmpl1, PAGE_SIZE); } return (l2); } /* * Create an L3 table to map all addresses within an L2 mapping. */ static pt_entry_t * pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va, struct rwlock **lockp) { pt_entry_t *l3, newl3, oldl2; vm_offset_t tmpl2; vm_paddr_t l3phys, phys; vm_page_t ml3; int i; PMAP_LOCK_ASSERT(pmap, MA_OWNED); l3 = NULL; oldl2 = pmap_load(l2); KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_demote_l2: Demoting a non-block entry")); KASSERT((va & L2_OFFSET) == 0, ("pmap_demote_l2: Invalid virtual address %#lx", va)); tmpl2 = 0; if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) { tmpl2 = kva_alloc(PAGE_SIZE); if (tmpl2 == 0) return (NULL); } if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) { ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va), (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (ml3 == NULL) { CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx" " in pmap %p", va, pmap); goto fail; } if (va < VM_MAXUSER_ADDRESS) pmap_resident_count_inc(pmap, 1); } l3phys = VM_PAGE_TO_PHYS(ml3); l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys); /* Address the range points at */ phys = oldl2 & ~ATTR_MASK; /* The attributed from the old l2 table to be copied */ newl3 = (oldl2 & (ATTR_MASK & ~ATTR_DESCR_MASK)) | L3_PAGE; /* * If the page table page is new, initialize it. */ if (ml3->wire_count == 1) { for (i = 0; i < Ln_ENTRIES; i++) { l3[i] = newl3 | phys; phys += L3_SIZE; } cpu_dcache_wb_range((vm_offset_t)l3, PAGE_SIZE); } KASSERT(l3[0] == ((oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE), ("Invalid l3 page (%lx != %lx)", l3[0], (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE)); /* * Map the temporary page so we don't lose access to the l2 table. */ if (tmpl2 != 0) { pmap_kenter(tmpl2, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY); l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK)); } /* * The spare PV entries must be reserved prior to demoting the * mapping, that is, prior to changing the PDE. Otherwise, the state * of the L2 and the PV lists will be inconsistent, which can result * in reclaim_pv_chunk() attempting to remove a PV entry from the * wrong PV list and pmap_pv_demote_l2() failing to find the expected * PV entry for the 2MB page mapping that is being demoted. */ if ((oldl2 & ATTR_SW_MANAGED) != 0) reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp); pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE); /* * Demote the PV entry. */ if ((oldl2 & ATTR_SW_MANAGED) != 0) pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp); atomic_add_long(&pmap_l2_demotions, 1); CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx" " in pmap %p %lx", va, pmap, l3[0]); fail: if (tmpl2 != 0) { pmap_kremove(tmpl2); kva_free(tmpl2, PAGE_SIZE); } return (l3); } static pt_entry_t * pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va) { struct rwlock *lock; pt_entry_t *l3; lock = NULL; l3 = pmap_demote_l2_locked(pmap, l2, va, &lock); if (lock != NULL) rw_wunlock(lock); return (l3); } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pd_entry_t *l1p, l1; pd_entry_t *l2p, l2; pt_entry_t *l3p, l3; vm_paddr_t pa; bool managed; int val; PMAP_LOCK(pmap); retry: pa = 0; val = 0; managed = false; l1p = pmap_l1(pmap, addr); if (l1p == NULL) /* No l1 */ goto done; l1 = pmap_load(l1p); if ((l1 & ATTR_DESCR_MASK) == L1_INVAL) goto done; if ((l1 & ATTR_DESCR_MASK) == L1_BLOCK) { pa = (l1 & ~ATTR_MASK) | (addr & L1_OFFSET); managed = (l1 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED; val = MINCORE_SUPER | MINCORE_INCORE; if (pmap_page_dirty(l1)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((l1 & ATTR_AF) == ATTR_AF) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; goto done; } l2p = pmap_l1_to_l2(l1p, addr); if (l2p == NULL) /* No l2 */ goto done; l2 = pmap_load(l2p); if ((l2 & ATTR_DESCR_MASK) == L2_INVAL) goto done; if ((l2 & ATTR_DESCR_MASK) == L2_BLOCK) { pa = (l2 & ~ATTR_MASK) | (addr & L2_OFFSET); managed = (l2 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED; val = MINCORE_SUPER | MINCORE_INCORE; if (pmap_page_dirty(l2)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((l2 & ATTR_AF) == ATTR_AF) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; goto done; } l3p = pmap_l2_to_l3(l2p, addr); if (l3p == NULL) /* No l3 */ goto done; l3 = pmap_load(l2p); if ((l3 & ATTR_DESCR_MASK) == L3_INVAL) goto done; if ((l3 & ATTR_DESCR_MASK) == L3_PAGE) { pa = (l3 & ~ATTR_MASK) | (addr & L3_OFFSET); managed = (l3 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED; val = MINCORE_INCORE; if (pmap_page_dirty(l3)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((l3 & ATTR_AF) == ATTR_AF) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; } done: if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) goto retry; } else PA_UNLOCK_COND(*locked_pa); PMAP_UNLOCK(pmap); return (val); } void pmap_activate(struct thread *td) { pmap_t pmap; critical_enter(); pmap = vmspace_pmap(td->td_proc->p_vmspace); td->td_pcb->pcb_l0addr = vtophys(pmap->pm_l0); __asm __volatile("msr ttbr0_el1, %0" : : "r"(td->td_pcb->pcb_l0addr)); pmap_invalidate_all(pmap); critical_exit(); } void pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz) { if (va >= VM_MIN_KERNEL_ADDRESS) { cpu_icache_sync_range(va, sz); } else { u_int len, offset; vm_paddr_t pa; /* Find the length of data in this page to flush */ offset = va & PAGE_MASK; len = imin(PAGE_SIZE - offset, sz); while (sz != 0) { /* Extract the physical address & find it in the DMAP */ pa = pmap_extract(pmap, va); if (pa != 0) cpu_icache_sync_range(PHYS_TO_DMAP(pa), len); /* Move to the next page */ sz -= len; va += len; /* Set the length for the next iteration */ len = imin(PAGE_SIZE, sz); } } } int pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far) { #ifdef SMP uint64_t par; #endif switch (ESR_ELx_EXCEPTION(esr)) { case EXCP_DATA_ABORT_L: case EXCP_DATA_ABORT: break; default: return (KERN_FAILURE); } #ifdef SMP PMAP_LOCK(pmap); switch (esr & ISS_DATA_DFSC_MASK) { case ISS_DATA_DFSC_TF_L0: case ISS_DATA_DFSC_TF_L1: case ISS_DATA_DFSC_TF_L2: case ISS_DATA_DFSC_TF_L3: /* Ask the MMU to check the address */ if (pmap == kernel_pmap) par = arm64_address_translate_s1e1r(far); else par = arm64_address_translate_s1e0r(far); /* * If the translation was successful the address was invalid * due to a break-before-make sequence. We can unlock and * return success to the trap handler. */ if (PAR_SUCCESS(par)) { PMAP_UNLOCK(pmap); return (KERN_SUCCESS); } break; default: break; } PMAP_UNLOCK(pmap); #endif return (KERN_FAILURE); } /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. */ void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { vm_offset_t superpage_offset; if (size < L2_SIZE) return; if (object != NULL && (object->flags & OBJ_COLORED) != 0) offset += ptoa(object->pg_color); superpage_offset = offset & L2_OFFSET; if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE || (*addr & L2_OFFSET) == superpage_offset) return; if ((*addr & L2_OFFSET) < superpage_offset) *addr = (*addr & ~L2_OFFSET) + superpage_offset; else *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset; } /** * Get the kernel virtual address of a set of physical pages. If there are * physical addresses not covered by the DMAP perform a transient mapping * that will be removed when calling pmap_unmap_io_transient. * * \param page The pages the caller wishes to obtain the virtual * address on the kernel memory map. * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. * \param can_fault TRUE if the thread using the mapped pages can take * page faults, FALSE otherwise. * * \returns TRUE if the caller must call pmap_unmap_io_transient when * finished or FALSE otherwise. * */ boolean_t pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, boolean_t can_fault) { vm_paddr_t paddr; boolean_t needs_mapping; int error, i; /* * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ needs_mapping = FALSE; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(!PHYS_IN_DMAP(paddr))) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); needs_mapping = TRUE; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } } /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) return (FALSE); if (!can_fault) sched_pin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (!PHYS_IN_DMAP(paddr)) { panic( "pmap_map_io_transient: TODO: Map out of DMAP data"); } } return (needs_mapping); } void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, boolean_t can_fault) { vm_paddr_t paddr; int i; if (!can_fault) sched_unpin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (!PHYS_IN_DMAP(paddr)) { panic("ARM64TODO: pmap_unmap_io_transient: Unmap data"); } } } Index: projects/clang500-import/sys/compat/linuxkpi/common/src/linux_compat.c =================================================================== --- projects/clang500-import/sys/compat/linuxkpi/common/src/linux_compat.c (revision 319548) +++ projects/clang500-import/sys/compat/linuxkpi/common/src/linux_compat.c (revision 319549) @@ -1,2030 +1,2040 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); #include /* Undo Linux compat changes. */ #undef RB_ROOT #undef file #undef cdev #define RB_ROOT(head) (head)->rbh_root static struct vm_area_struct *linux_cdev_handle_find(void *handle); struct kobject linux_class_root; struct device linux_root_device; struct class linux_class_misc; struct list_head pci_drivers; struct list_head pci_devices; spinlock_t pci_lock; unsigned long linux_timer_hz_mask; int panic_cmp(struct rb_node *one, struct rb_node *two) { panic("no cmp"); } RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) { va_list tmp_va; int len; char *old; char *name; char dummy; old = kobj->name; if (old && fmt == NULL) return (0); /* compute length of string */ va_copy(tmp_va, args); len = vsnprintf(&dummy, 0, fmt, tmp_va); va_end(tmp_va); /* account for zero termination */ len++; /* check for error */ if (len < 1) return (-EINVAL); /* allocate memory for string */ name = kzalloc(len, GFP_KERNEL); if (name == NULL) return (-ENOMEM); vsnprintf(name, len, fmt, args); kobj->name = name; /* free old string */ kfree(old); /* filter new string */ for (; *name != '\0'; name++) if (*name == '/') *name = '!'; return (0); } int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); return (error); } static int kobject_add_complete(struct kobject *kobj, struct kobject *parent) { const struct kobj_type *t; int error; kobj->parent = parent; error = sysfs_create_dir(kobj); if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { struct attribute **attr; t = kobj->ktype; for (attr = t->default_attrs; *attr != NULL; attr++) { error = sysfs_create_file(kobj, *attr); if (error) break; } if (error) sysfs_remove_dir(kobj); } return (error); } int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } void linux_kobject_release(struct kref *kref) { struct kobject *kobj; char *name; kobj = container_of(kref, struct kobject, kref); sysfs_remove_dir(kobj); name = kobj->name; if (kobj->ktype && kobj->ktype->release) kobj->ktype->release(kobj); kfree(name); } static void linux_kobject_kfree(struct kobject *kobj) { kfree(kobj); } static void linux_kobject_kfree_name(struct kobject *kobj) { if (kobj) { kfree(kobj->name); } } const struct kobj_type linux_kfree_type = { .release = linux_kobject_kfree }; static void linux_device_release(struct device *dev) { pr_debug("linux_device_release: %s\n", dev_name(dev)); kfree(dev); } static ssize_t linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct class, kobj), dattr, buf); return (error); } static ssize_t linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct class, kobj), dattr, buf, count); return (error); } static void linux_class_release(struct kobject *kobj) { struct class *class; class = container_of(kobj, struct class, kobj); if (class->class_release) class->class_release(class); } static const struct sysfs_ops linux_class_sysfs = { .show = linux_class_show, .store = linux_class_store, }; const struct kobj_type linux_class_ktype = { .release = linux_class_release, .sysfs_ops = &linux_class_sysfs }; static void linux_dev_release(struct kobject *kobj) { struct device *dev; dev = container_of(kobj, struct device, kobj); /* This is the precedence defined by linux. */ if (dev->release) dev->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); } static ssize_t linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct device, kobj), dattr, buf); return (error); } static ssize_t linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct device, kobj), dattr, buf, count); return (error); } static const struct sysfs_ops linux_dev_sysfs = { .show = linux_dev_show, .store = linux_dev_store, }; const struct kobj_type linux_dev_ktype = { .release = linux_dev_release, .sysfs_ops = &linux_dev_sysfs }; struct device * device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { struct device *dev; va_list args; dev = kzalloc(sizeof(*dev), M_WAITOK); dev->parent = parent; dev->class = class; dev->devt = devt; dev->driver_data = drvdata; dev->release = linux_device_release; va_start(args, fmt); kobject_set_name_vargs(&dev->kobj, fmt, args); va_end(args); device_register(dev); return (dev); } int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; int error; kobject_init(kobj, ktype); kobj->ktype = ktype; kobj->parent = parent; kobj->name = NULL; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } static void linux_file_dtor(void *cdp) { struct linux_file *filp; linux_set_current(curthread); filp = cdp; filp->f_op->release(filp->f_vnode, filp); vdrop(filp->f_vnode); kfree(filp); } static void linux_kq_lock(void *arg) { spinlock_t *s = arg; spin_lock(s); } static void linux_kq_unlock(void *arg) { spinlock_t *s = arg; spin_unlock(s); } static void linux_kq_lock_owned(void *arg) { #ifdef INVARIANTS spinlock_t *s = arg; mtx_assert(&s->m, MA_OWNED); #endif } static void linux_kq_lock_unowned(void *arg) { #ifdef INVARIANTS spinlock_t *s = arg; mtx_assert(&s->m, MA_NOTOWNED); #endif } static void -linux_dev_kqfilter_poll(struct linux_file *); +linux_dev_kqfilter_poll(struct linux_file *, int); struct linux_file * linux_file_alloc(void) { struct linux_file *filp; filp = kzalloc(sizeof(*filp), GFP_KERNEL); /* set initial refcount */ filp->f_count = 1; /* setup fields needed by kqueue support */ spin_lock_init(&filp->f_kqlock); knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, linux_kq_lock, linux_kq_unlock, linux_kq_lock_owned, linux_kq_lock_unowned); return (filp); } void linux_file_free(struct linux_file *filp) { if (filp->_file == NULL) { kfree(filp); } else { /* * The close method of the character device or file * will free the linux_file structure: */ _fdrop(filp->_file, curthread); } } static int linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) { struct vm_area_struct *vmap; struct vm_fault vmf; int err; linux_set_current(curthread); /* get VM area structure */ vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); /* fill out VM fault structure */ vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; vmf.pgoff = 0; vmf.page = NULL; VM_OBJECT_WUNLOCK(vm_obj); down_write(&vmap->vm_mm->mmap_sem); if (unlikely(vmap->vm_ops == NULL)) { err = VM_FAULT_SIGBUS; } else { vmap->vm_pfn_count = 0; vmap->vm_pfn_pcount = &vmap->vm_pfn_count; vmap->vm_obj = vm_obj; err = vmap->vm_ops->fault(vmap, &vmf); while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { kern_yield(PRI_USER); err = vmap->vm_ops->fault(vmap, &vmf); } } /* translate return code */ switch (err) { case VM_FAULT_OOM: err = VM_PAGER_AGAIN; break; case VM_FAULT_SIGBUS: err = VM_PAGER_BAD; break; case VM_FAULT_NOPAGE: /* * By contract the fault handler will return having * busied all the pages itself. If pidx is already * found in the object, it will simply xbusy the first * page and return with vm_pfn_count set to 1. */ *first = vmap->vm_pfn_first; *last = *first + vmap->vm_pfn_count - 1; err = VM_PAGER_OK; break; default: err = VM_PAGER_ERROR; break; } up_write(&vmap->vm_mm->mmap_sem); VM_OBJECT_WLOCK(vm_obj); return (err); } static struct rwlock linux_vma_lock; static TAILQ_HEAD(, vm_area_struct) linux_vma_head = TAILQ_HEAD_INITIALIZER(linux_vma_head); static void linux_cdev_handle_free(struct vm_area_struct *vmap) { /* Drop reference on vm_file */ if (vmap->vm_file != NULL) fput(vmap->vm_file); /* Drop reference on mm_struct */ mmput(vmap->vm_mm); kfree(vmap); } static struct vm_area_struct * linux_cdev_handle_insert(void *handle, struct vm_area_struct *vmap) { struct vm_area_struct *ptr; rw_wlock(&linux_vma_lock); TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { if (ptr->vm_private_data == handle) { rw_wunlock(&linux_vma_lock); linux_cdev_handle_free(vmap); return (NULL); } } TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); rw_wunlock(&linux_vma_lock); return (vmap); } static void linux_cdev_handle_remove(struct vm_area_struct *vmap) { rw_wlock(&linux_vma_lock); TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); rw_wunlock(&linux_vma_lock); } static struct vm_area_struct * linux_cdev_handle_find(void *handle) { struct vm_area_struct *vmap; rw_rlock(&linux_vma_lock); TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { if (vmap->vm_private_data == handle) break; } rw_runlock(&linux_vma_lock); return (vmap); } static int linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { MPASS(linux_cdev_handle_find(handle) != NULL); *color = 0; return (0); } static void linux_cdev_pager_dtor(void *handle) { const struct vm_operations_struct *vm_ops; struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(handle); MPASS(vmap != NULL); /* * Remove handle before calling close operation to prevent * other threads from reusing the handle pointer. */ linux_cdev_handle_remove(vmap); down_write(&vmap->vm_mm->mmap_sem); vm_ops = vmap->vm_ops; if (likely(vm_ops != NULL)) vm_ops->close(vmap); up_write(&vmap->vm_mm->mmap_sem); linux_cdev_handle_free(vmap); } static struct cdev_pager_ops linux_cdev_pager_ops = { .cdev_pg_populate = linux_cdev_pager_populate, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }; static int linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct linux_cdev *ldev; struct linux_file *filp; struct file *file; int error; file = td->td_fpop; ldev = dev->si_drv1; if (ldev == NULL) return (ENODEV); filp = linux_file_alloc(); filp->f_dentry = &filp->f_dentry_store; filp->f_op = ldev->ops; filp->f_flags = file->f_flag; vhold(file->f_vnode); filp->f_vnode = file->f_vnode; filp->_file = file; linux_set_current(td); if (filp->f_op->open) { error = -filp->f_op->open(file->f_vnode, filp); if (error) { vdrop(filp->f_vnode); kfree(filp); goto done; } } error = devfs_set_cdevpriv(filp, linux_file_dtor); if (error) { filp->f_op->release(file->f_vnode, filp); vdrop(filp->f_vnode); kfree(filp); } done: return (error); } static int linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct linux_file *filp; struct file *file; int error; file = td->td_fpop; if (dev->si_drv1 == NULL) return (0); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; devfs_clear_cdevpriv(); return (0); } #define LINUX_IOCTL_MIN_PTR 0x10000UL #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) static inline int linux_remap_address(void **uaddr, size_t len) { uintptr_t uaddr_val = (uintptr_t)(*uaddr); if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && uaddr_val < LINUX_IOCTL_MAX_PTR)) { struct task_struct *pts = current; if (pts == NULL) { *uaddr = NULL; return (1); } /* compute data offset */ uaddr_val -= LINUX_IOCTL_MIN_PTR; /* check that length is within bounds */ if ((len > IOCPARM_MAX) || (uaddr_val + len) > pts->bsd_ioctl_len) { *uaddr = NULL; return (1); } /* re-add kernel buffer address */ uaddr_val += (uintptr_t)pts->bsd_ioctl_data; /* update address location */ *uaddr = (void *)uaddr_val; return (1); } return (0); } int linux_copyin(const void *uaddr, void *kaddr, size_t len) { if (linux_remap_address(__DECONST(void **, &uaddr), len)) { if (uaddr == NULL) return (-EFAULT); memcpy(kaddr, uaddr, len); return (0); } return (-copyin(uaddr, kaddr, len)); } int linux_copyout(const void *kaddr, void *uaddr, size_t len) { if (linux_remap_address(&uaddr, len)) { if (uaddr == NULL) return (-EFAULT); memcpy(uaddr, kaddr, len); return (0); } return (-copyout(kaddr, uaddr, len)); } size_t linux_clear_user(void *_uaddr, size_t _len) { uint8_t *uaddr = _uaddr; size_t len = _len; /* make sure uaddr is aligned before going into the fast loop */ while (((uintptr_t)uaddr & 7) != 0 && len > 7) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } /* zero 8 bytes at a time */ while (len > 7) { #ifdef __LP64__ if (suword64(uaddr, 0)) return (_len); #else if (suword32(uaddr, 0)) return (_len); if (suword32(uaddr + 4, 0)) return (_len); #endif uaddr += 8; len -= 8; } /* zero fill end, if any */ while (len > 0) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } return (0); } int linux_access_ok(int rw, const void *uaddr, size_t len) { uintptr_t saddr; uintptr_t eaddr; /* get start and end address */ saddr = (uintptr_t)uaddr; eaddr = (uintptr_t)uaddr + len; /* verify addresses are valid for userspace */ return ((saddr == eaddr) || (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); } static int linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct linux_file *filp; struct file *file; unsigned size; int error; file = td->td_fpop; if (dev->si_drv1 == NULL) return (ENXIO); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; + /* the LinuxKPI supports blocking and non-blocking I/O */ + if (cmd == FIONBIO || cmd == FIOASYNC) + return (0); + linux_set_current(td); size = IOCPARM_LEN(cmd); /* refer to logic in sys_ioctl() */ if (size > 0) { /* * Setup hint for linux_copyin() and linux_copyout(). * * Background: Linux code expects a user-space address * while FreeBSD supplies a kernel-space address. */ current->bsd_ioctl_data = data; current->bsd_ioctl_len = size; data = (void *)LINUX_IOCTL_MIN_PTR; } else { /* fetch user-space pointer */ data = *(void **)data; } if (filp->f_op->unlocked_ioctl) error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); else error = ENOTTY; if (size > 0) { current->bsd_ioctl_data = NULL; current->bsd_ioctl_len = 0; } - if (error == EWOULDBLOCK) - linux_dev_kqfilter_poll(filp); - else if (error == ERESTARTSYS) + if (error == EWOULDBLOCK) { + /* update kqfilter status, if any */ + linux_dev_kqfilter_poll(filp, + LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); + } else if (error == ERESTARTSYS) error = ERESTART; return (error); } static int linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag) { struct linux_file *filp; struct thread *td; struct file *file; ssize_t bytes; int error; td = curthread; file = td->td_fpop; if (dev->si_drv1 == NULL) return (ENXIO); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); linux_set_current(td); if (filp->f_op->read) { bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else { error = -bytes; - if (error == EWOULDBLOCK) - linux_dev_kqfilter_poll(filp); - else if (error == ERESTARTSYS) + if (error == ERESTARTSYS) error = ERESTART; } } else error = ENXIO; + /* update kqfilter status, if any */ + linux_dev_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); + return (error); } static int linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag) { struct linux_file *filp; struct thread *td; struct file *file; ssize_t bytes; int error; td = curthread; file = td->td_fpop; if (dev->si_drv1 == NULL) return (ENXIO); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); linux_set_current(td); if (filp->f_op->write) { bytes = filp->f_op->write(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else { error = -bytes; - if (error == EWOULDBLOCK) - linux_dev_kqfilter_poll(filp); - else if (error == ERESTARTSYS) + if (error == ERESTARTSYS) error = ERESTART; } } else error = ENXIO; + /* update kqfilter status, if any */ + linux_dev_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); + return (error); } static int linux_dev_poll(struct cdev *dev, int events, struct thread *td) { struct linux_file *filp; struct file *file; int revents; if (dev->si_drv1 == NULL) goto error; if (devfs_get_cdevpriv((void **)&filp) != 0) goto error; file = td->td_fpop; filp->f_flags = file->f_flag; linux_set_current(td); if (filp->f_op->poll != NULL) { selrecord(td, &filp->f_selinfo); revents = filp->f_op->poll(filp, NULL) & events; } else revents = 0; return (revents); error: return (events & (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); } void linux_poll_wakeup(struct linux_file *filp) { /* this function should be NULL-safe */ if (filp == NULL) return; selwakeup(&filp->f_selinfo); spin_lock(&filp->f_kqlock); filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); spin_unlock(&filp->f_kqlock); } static void linux_dev_kqfilter_detach(struct knote *kn) { struct linux_file *filp = kn->kn_hook; spin_lock(&filp->f_kqlock); knlist_remove(&filp->f_selinfo.si_note, kn, 1); spin_unlock(&filp->f_kqlock); } static int linux_dev_kqfilter_read_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock.m, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); } static int linux_dev_kqfilter_write_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock.m, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); } static struct filterops linux_dev_kqfiltops_read = { .f_isfd = 1, .f_detach = linux_dev_kqfilter_detach, .f_event = linux_dev_kqfilter_read_event, }; static struct filterops linux_dev_kqfiltops_write = { .f_isfd = 1, .f_detach = linux_dev_kqfilter_detach, .f_event = linux_dev_kqfilter_write_event, }; static void -linux_dev_kqfilter_poll(struct linux_file *filp) +linux_dev_kqfilter_poll(struct linux_file *filp, int kqflags) { int temp; - spin_lock(&filp->f_kqlock); - temp = (filp->f_kqflags & (LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE)); - filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE); - spin_unlock(&filp->f_kqlock); - - if (temp != 0) { + if (filp->f_kqflags & kqflags) { /* get the latest polling state */ temp = filp->f_op->poll(filp, NULL); + spin_lock(&filp->f_kqlock); + /* clear kqflags */ + filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | + LINUX_KQ_FLAG_NEED_WRITE); + /* update kqflags */ if (temp & (POLLIN | POLLOUT)) { - spin_lock(&filp->f_kqlock); if (temp & POLLIN) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; if (temp & POLLOUT) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); - spin_unlock(&filp->f_kqlock); } + spin_unlock(&filp->f_kqlock); } } static int linux_dev_kqfilter(struct cdev *dev, struct knote *kn) { struct linux_file *filp; struct file *file; struct thread *td; int error; td = curthread; file = td->td_fpop; if (dev->si_drv1 == NULL) return (ENXIO); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; if (filp->f_op->poll == NULL) return (EINVAL); spin_lock(&filp->f_kqlock); switch (kn->kn_filter) { case EVFILT_READ: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; kn->kn_fop = &linux_dev_kqfiltops_read; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); break; case EVFILT_WRITE: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; kn->kn_fop = &linux_dev_kqfiltops_write; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); break; default: error = EINVAL; break; } spin_unlock(&filp->f_kqlock); if (error == 0) { linux_set_current(td); - linux_dev_kqfilter_poll(filp); + + /* update kqfilter status, if any */ + linux_dev_kqfilter_poll(filp, + LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } return (error); } static int linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot) { struct vm_area_struct *vmap; struct mm_struct *mm; struct linux_file *filp; struct thread *td; struct file *file; vm_memattr_t attr; int error; td = curthread; file = td->td_fpop; if (dev->si_drv1 == NULL) return (ENODEV); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; if (filp->f_op->mmap == NULL) return (ENODEV); linux_set_current(td); /* * The same VM object might be shared by multiple processes * and the mm_struct is usually freed when a process exits. * * The atomic reference below makes sure the mm_struct is * available as long as the vmap is in the linux_vma_head. */ mm = current->mm; if (atomic_inc_not_zero(&mm->mm_users) == 0) return (EINVAL); vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); vmap->vm_start = 0; vmap->vm_end = size; vmap->vm_pgoff = *offset / PAGE_SIZE; vmap->vm_pfn = 0; vmap->vm_flags = vmap->vm_page_prot = nprot; vmap->vm_ops = NULL; vmap->vm_file = get_file(filp); vmap->vm_mm = mm; if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { error = EINTR; } else { error = -filp->f_op->mmap(filp, vmap); up_write(&vmap->vm_mm->mmap_sem); } if (error != 0) { linux_cdev_handle_free(vmap); return (error); } attr = pgprot2cachemode(vmap->vm_page_prot); if (vmap->vm_ops != NULL) { void *vm_private_data; if (vmap->vm_ops->fault == NULL || vmap->vm_ops->open == NULL || vmap->vm_ops->close == NULL || vmap->vm_private_data == NULL) { linux_cdev_handle_free(vmap); return (EINVAL); } vm_private_data = vmap->vm_private_data; vmap = linux_cdev_handle_insert(vm_private_data, vmap); *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, &linux_cdev_pager_ops, size, nprot, *offset, curthread->td_ucred); if (*object == NULL) { linux_cdev_handle_remove(vmap); linux_cdev_handle_free(vmap); return (EINVAL); } } else { struct sglist *sg; sg = sglist_alloc(1, M_WAITOK); sglist_append_phys(sg, (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, nprot, 0, curthread->td_ucred); linux_cdev_handle_free(vmap); if (*object == NULL) { sglist_free(sg); return (EINVAL); } } if (attr != VM_MEMATTR_DEFAULT) { VM_OBJECT_WLOCK(*object); vm_object_set_memattr(*object, attr); VM_OBJECT_WUNLOCK(*object); } *offset = 0; return (0); } struct cdevsw linuxcdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = linux_dev_open, .d_close = linux_dev_close, .d_read = linux_dev_read, .d_write = linux_dev_write, .d_ioctl = linux_dev_ioctl, .d_mmap_single = linux_dev_mmap_single, .d_poll = linux_dev_poll, .d_kqfilter = linux_dev_kqfilter, .d_name = "lkpidev", }; static int linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; ssize_t bytes; int error; error = 0; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); linux_set_current(td); if (filp->f_op->read) { bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else error = -bytes; } else error = ENXIO; return (error); } static int linux_file_poll(struct file *file, int events, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; int revents; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; linux_set_current(td); if (filp->f_op->poll != NULL) { selrecord(td, &filp->f_selinfo); revents = filp->f_op->poll(filp, NULL) & events; } else revents = 0; return (revents); } static int linux_file_close(struct file *file, struct thread *td) { struct linux_file *filp; int error; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; linux_set_current(td); error = -filp->f_op->release(NULL, filp); funsetown(&filp->f_sigio); kfree(filp); return (error); } static int linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, struct thread *td) { struct linux_file *filp; int error; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; error = 0; linux_set_current(td); switch (cmd) { case FIONBIO: break; case FIOASYNC: if (filp->f_op->fasync == NULL) break; error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); break; case FIOSETOWN: error = fsetown(*(int *)data, &filp->f_sigio); if (error == 0) error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); break; case FIOGETOWN: *(int *)data = fgetown(&filp->f_sigio); break; default: error = ENOTTY; break; } return (error); } static int linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { return (EOPNOTSUPP); } static int linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { return (0); } struct fileops linuxfileops = { .fo_read = linux_file_read, .fo_write = invfo_rdwr, .fo_truncate = invfo_truncate, .fo_kqfilter = invfo_kqfilter, .fo_stat = linux_file_stat, .fo_fill_kinfo = linux_file_fill_kinfo, .fo_poll = linux_file_poll, .fo_close = linux_file_close, .fo_ioctl = linux_file_ioctl, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, }; /* * Hash of vmmap addresses. This is infrequently accessed and does not * need to be particularly large. This is done because we must store the * caller's idea of the map size to properly unmap. */ struct vmmap { LIST_ENTRY(vmmap) vm_next; void *vm_addr; unsigned long vm_size; }; struct vmmaphd { struct vmmap *lh_first; }; #define VMMAP_HASH_SIZE 64 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; static struct mtx vmmaplock; static void vmmap_add(void *addr, unsigned long size) { struct vmmap *vmmap; vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); mtx_lock(&vmmaplock); vmmap->vm_size = size; vmmap->vm_addr = addr; LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); mtx_unlock(&vmmaplock); } static struct vmmap * vmmap_remove(void *addr) { struct vmmap *vmmap; mtx_lock(&vmmaplock); LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) if (vmmap->vm_addr == addr) break; if (vmmap) LIST_REMOVE(vmmap, vm_next); mtx_unlock(&vmmaplock); return (vmmap); } #if defined(__i386__) || defined(__amd64__) void * _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) { void *addr; addr = pmap_mapdev_attr(phys_addr, size, attr); if (addr == NULL) return (NULL); vmmap_add(addr, size); return (addr); } #endif void iounmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; #if defined(__i386__) || defined(__amd64__) pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); #endif kfree(vmmap); } void * vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) { vm_offset_t off; size_t size; size = count * PAGE_SIZE; off = kva_alloc(size); if (off == 0) return (NULL); vmmap_add((void *)off, size); pmap_qenter(off, pages, count); return ((void *)off); } void vunmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); kva_free((vm_offset_t)addr, vmmap->vm_size); kfree(vmmap); } char * kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = kmalloc(len + 1, gfp); if (p != NULL) vsnprintf(p, len + 1, fmt, ap); return (p); } char * kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return (p); } static void linux_timer_callback_wrapper(void *context) { struct timer_list *timer; linux_set_current(curthread); timer = context; timer->function(timer->data); } void mod_timer(struct timer_list *timer, unsigned long expires) { timer->expires = expires; callout_reset(&timer->timer_callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); } void add_timer(struct timer_list *timer) { callout_reset(&timer->timer_callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer); } void add_timer_on(struct timer_list *timer, int cpu) { callout_reset_on(&timer->timer_callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer, cpu); } static void linux_timer_init(void *arg) { /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ linux_timer_hz_mask = 1; while (linux_timer_hz_mask < (unsigned long)hz) linux_timer_hz_mask *= 2; linux_timer_hz_mask--; } SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); void linux_complete_common(struct completion *c, int all) { int wakeup_swapper; sleepq_lock(c); c->done++; if (all) wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); else wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); sleepq_release(c); if (wakeup_swapper) kick_proc0(); } /* * Indefinite wait for done != 0 with or without signals. */ long linux_wait_for_common(struct completion *c, int flags) { long error; if (SCHEDULER_STOPPED()) return (0); DROP_GIANT(); if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; error = 0; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); if (flags & SLEEPQ_INTERRUPTIBLE) { if (sleepq_wait_sig(c, 0) != 0) { error = -ERESTARTSYS; goto intr; } } else sleepq_wait(c, 0); } c->done--; sleepq_release(c); intr: PICKUP_GIANT(); return (error); } /* * Time limited wait for done != 0 with or without signals. */ long linux_wait_for_timeout_common(struct completion *c, long timeout, int flags) { long end = jiffies + timeout, error; int ret; if (SCHEDULER_STOPPED()) return (0); DROP_GIANT(); if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; error = 0; ret = 0; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); sleepq_set_timeout(c, linux_timer_jiffies_until(end)); if (flags & SLEEPQ_INTERRUPTIBLE) ret = sleepq_timedwait_sig(c, 0); else ret = sleepq_timedwait(c, 0); if (ret != 0) { /* check for timeout or signal */ if (ret == EWOULDBLOCK) error = 0; else error = -ERESTARTSYS; goto intr; } } c->done--; sleepq_release(c); intr: PICKUP_GIANT(); /* return how many jiffies are left */ return (ret != 0 ? error : linux_timer_jiffies_until(end)); } int linux_try_wait_for_completion(struct completion *c) { int isdone; isdone = 1; sleepq_lock(c); if (c->done) c->done--; else isdone = 0; sleepq_release(c); return (isdone); } int linux_completion_done(struct completion *c) { int isdone; isdone = 1; sleepq_lock(c); if (c->done == 0) isdone = 0; sleepq_release(c); return (isdone); } static void linux_cdev_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; if (cdev->cdev) destroy_dev(cdev->cdev); kfree(cdev); kobject_put(parent); } static void linux_cdev_static_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; if (cdev->cdev) destroy_dev(cdev->cdev); kobject_put(parent); } const struct kobj_type linux_cdev_ktype = { .release = linux_cdev_release, }; const struct kobj_type linux_cdev_static_ktype = { .release = linux_cdev_static_release, }; static void linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) { struct notifier_block *nb; nb = arg; if (linkstate == LINK_STATE_UP) nb->notifier_call(nb, NETDEV_UP, ifp); else nb->notifier_call(nb, NETDEV_DOWN, ifp); } static void linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_REGISTER, ifp); } static void linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); } static void linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); } static void linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); } int register_netdevice_notifier(struct notifier_block *nb) { nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( iflladdr_event, linux_handle_iflladdr_event, nb, 0); return (0); } int register_inetaddr_notifier(struct notifier_block *nb) { nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( ifaddr_event, linux_handle_ifaddr_event, nb, 0); return (0); } int unregister_netdevice_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifnet_link_event, nb->tags[NETDEV_UP]); EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nb->tags[NETDEV_REGISTER]); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nb->tags[NETDEV_UNREGISTER]); EVENTHANDLER_DEREGISTER(iflladdr_event, nb->tags[NETDEV_CHANGEADDR]); return (0); } int unregister_inetaddr_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifaddr_event, nb->tags[NETDEV_CHANGEIFADDR]); return (0); } struct list_sort_thunk { int (*cmp)(void *, struct list_head *, struct list_head *); void *priv; }; static inline int linux_le_cmp(void *priv, const void *d1, const void *d2) { struct list_head *le1, *le2; struct list_sort_thunk *thunk; thunk = priv; le1 = *(__DECONST(struct list_head **, d1)); le2 = *(__DECONST(struct list_head **, d2)); return ((thunk->cmp)(thunk->priv, le1, le2)); } void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { struct list_sort_thunk thunk; struct list_head **ar, *le; size_t count, i; count = 0; list_for_each(le, head) count++; ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); i = 0; list_for_each(le, head) ar[i++] = le; thunk.cmp = cmp; thunk.priv = priv; qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); INIT_LIST_HEAD(head); for (i = 0; i < count; i++) list_add_tail(ar[i], head); free(ar, M_KMALLOC); } void linux_irq_handler(void *ent) { struct irq_ent *irqe; linux_set_current(curthread); irqe = ent; irqe->handler(irqe->irq, irqe->arg); } #if defined(__i386__) || defined(__amd64__) int linux_wbinvd_on_all_cpus(void) { pmap_invalidate_cache(); return (0); } #endif int linux_on_each_cpu(void callback(void *), void *data) { smp_rendezvous(smp_no_rendezvous_barrier, callback, smp_no_rendezvous_barrier, data); return (0); } int linux_in_atomic(void) { return ((curthread->td_pflags & TDP_NOFAULTING) != 0); } struct linux_cdev * linux_find_cdev(const char *name, unsigned major, unsigned minor) { int unit = MKDEV(major, minor); struct cdev *cdev; dev_lock(); LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { struct linux_cdev *ldev = cdev->si_drv1; if (dev2unit(cdev) == unit && strcmp(kobject_name(&ldev->kobj), name) == 0) { break; } } dev_unlock(); return (cdev != NULL ? cdev->si_drv1 : NULL); } int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev_init(cdev, fops); kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, makedev(major, i), 1); if (ret != 0) break; } return (ret); } int __register_chrdev_p(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops, uid_t uid, gid_t gid, int mode) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev_init(cdev, fops); kobject_set_name(&cdev->kobj, name); ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); if (ret != 0) break; } return (ret); } void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name) { struct linux_cdev *cdevp; int i; for (i = baseminor; i < baseminor + count; i++) { cdevp = linux_find_cdev(name, major, i); if (cdevp != NULL) cdev_del(cdevp); } } #if defined(__i386__) || defined(__amd64__) bool linux_cpu_has_clflush; #endif static void linux_compat_init(void *arg) { struct sysctl_oid *rootoid; int i; #if defined(__i386__) || defined(__amd64__) linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); #endif rw_init(&linux_vma_lock, "lkpi-vma-lock"); rootoid = SYSCTL_ADD_ROOT_NODE(NULL, OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); kobject_init(&linux_class_root, &linux_class_ktype); kobject_set_name(&linux_class_root, "class"); linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); kobject_init(&linux_root_device.kobj, &linux_dev_ktype); kobject_set_name(&linux_root_device.kobj, "device"); linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, "device"); linux_root_device.bsddev = root_bus; linux_class_misc.name = "misc"; class_register(&linux_class_misc); INIT_LIST_HEAD(&pci_drivers); INIT_LIST_HEAD(&pci_devices); spin_lock_init(&pci_lock); mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); for (i = 0; i < VMMAP_HASH_SIZE; i++) LIST_INIT(&vmmaphead[i]); } SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); static void linux_compat_uninit(void *arg) { linux_kobject_kfree_name(&linux_class_root); linux_kobject_kfree_name(&linux_root_device.kobj); linux_kobject_kfree_name(&linux_class_misc.kobj); rw_destroy(&linux_vma_lock); } SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); /* * NOTE: Linux frequently uses "unsigned long" for pointer to integer * conversion and vice versa, where in FreeBSD "uintptr_t" would be * used. Assert these types have the same size, else some parts of the * LinuxKPI may not work like expected: */ CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); Index: projects/clang500-import/sys/conf/files.amd64 =================================================================== --- projects/clang500-import/sys/conf/files.amd64 (revision 319548) +++ projects/clang500-import/sys/conf/files.amd64 (revision 319549) @@ -1,715 +1,715 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S" \ compile-with "${CC} -x assembler-with-cpp -m32 -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # cloudabi64_vdso.o optional compat_cloudabi64 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_x86_64.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_x86_64.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi64_vdso.o" # cloudabi64_vdso_blob.o optional compat_cloudabi64 \ dependency "cloudabi64_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi64_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi64_vdso_blob.o" # linux32_genassym.o optional compat_linux32 \ dependency "$S/amd64/linux32/linux32_genassym.c" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "linux32_genassym.o" # linux32_assym.h optional compat_linux32 \ dependency "$S/kern/genassym.sh linux32_genassym.o" \ compile-with "sh $S/kern/genassym.sh linux32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "linux32_assym.h" # linux32_locore.o optional compat_linux32 \ dependency "linux32_assym.h $S/amd64/linux32/linux32_locore.s" \ - compile-with "${CC} -x assembler-with-cpp -DLOCORE -m32 -shared -s -pipe -I. -I$S -Werror -Wall -fno-common -nostdinc -nostdlib -Wl,-T$S/amd64/linux32/linux32_vdso.lds.s -Wl,-soname=linux32_vdso.so,--eh-frame-hdr,-fPIC,-warn-common ${.IMPSRC} -o ${.TARGET}" \ + compile-with "${CC} -x assembler-with-cpp -DLOCORE -m32 -shared -s -pipe -I. -I$S -Werror -Wall -fPIC -fno-common -nostdinc -nostdlib -Wl,-T$S/amd64/linux32/linux32_vdso.lds.s -Wl,-soname=linux32_vdso.so,--eh-frame-hdr,-warn-common ${.IMPSRC} -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "linux32_locore.o" # linux32_vdso.so optional compat_linux32 \ dependency "linux32_locore.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 linux32_locore.o ${.TARGET}" \ no-implicit-rule \ clean "linux32_vdso.so" # ia32_genassym.o standard \ dependency "$S/compat/ia32/ia32_genassym.c" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "ia32_genassym.o" # ia32_assym.h standard \ dependency "$S/kern/genassym.sh ia32_genassym.o" \ compile-with "env NM='${NM}' NMFLAGS='${NMFLAGS}' sh $S/kern/genassym.sh ia32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "ia32_assym.h" # font.h optional sc_dflt_font \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'static u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'static u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'static u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" # ukbdmap.h optional ukbd_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${UKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > ukbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" # hpt27xx_lib.o optional hpt27xx \ dependency "$S/dev/hpt27xx/amd64-elf.hpt27xx_lib.o.uu" \ compile-with "uudecode < $S/dev/hpt27xx/amd64-elf.hpt27xx_lib.o.uu" \ no-implicit-rule # hptmvraid.o optional hptmv \ dependency "$S/dev/hptmv/amd64-elf.raid.o.uu" \ compile-with "uudecode < $S/dev/hptmv/amd64-elf.raid.o.uu" \ no-implicit-rule # hptnr_lib.o optional hptnr \ dependency "$S/dev/hptnr/amd64-elf.hptnr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptnr/amd64-elf.hptnr_lib.o.uu" \ no-implicit-rule # hptrr_lib.o optional hptrr \ dependency "$S/dev/hptrr/amd64-elf.hptrr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptrr/amd64-elf.hptrr_lib.o.uu" \ no-implicit-rule # amd64/acpica/acpi_machdep.c optional acpi acpi_wakecode.o optional acpi \ dependency "$S/amd64/acpica/acpi_wakecode.S assym.s" \ compile-with "${NORMAL_S}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.o" acpi_wakecode.bin optional acpi \ dependency "acpi_wakecode.o" \ compile-with "${OBJCOPY} -S -O binary acpi_wakecode.o ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.bin" acpi_wakecode.h optional acpi \ dependency "acpi_wakecode.bin" \ compile-with "file2c -sx 'static char wakecode[] = {' '};' < acpi_wakecode.bin > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h" acpi_wakedata.h optional acpi \ dependency "acpi_wakecode.o" \ compile-with '${NM} -n --defined-only acpi_wakecode.o | while read offset dummy what; do echo "#define $${what} 0x$${offset}"; done > ${.TARGET}' \ no-obj no-implicit-rule before-depend \ clean "acpi_wakedata.h" # #amd64/amd64/apic_vector.S standard amd64/amd64/atomic.c standard amd64/amd64/bios.c standard amd64/amd64/bpf_jit_machdep.c optional bpf_jitter amd64/amd64/cpu_switch.S standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb amd64/amd64/db_trace.c optional ddb amd64/amd64/efirt.c optional efirt amd64/amd64/elf_machdep.c standard amd64/amd64/exception.S standard amd64/amd64/fpu.c standard amd64/amd64/gdb_machdep.c optional gdb amd64/amd64/in_cksum.c optional inet | inet6 amd64/amd64/initcpu.c standard amd64/amd64/io.c optional io amd64/amd64/locore.S standard no-obj amd64/amd64/xen-locore.S optional xenhvm amd64/amd64/machdep.c standard amd64/amd64/mem.c optional mem amd64/amd64/minidump_machdep.c standard amd64/amd64/mp_machdep.c optional smp amd64/amd64/mpboot.S optional smp amd64/amd64/pmap.c standard amd64/amd64/prof_machdep.c optional profiling-routine amd64/amd64/ptrace_machdep.c standard amd64/amd64/sigtramp.S standard amd64/amd64/support.S standard amd64/amd64/sys_machdep.c standard amd64/amd64/trap.c standard amd64/amd64/uio_machdep.c standard amd64/amd64/uma_machdep.c standard amd64/amd64/vm_machdep.c standard amd64/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 amd64/cloudabi64/cloudabi64_sysvec.c optional compat_cloudabi64 amd64/pci/pci_cfgreg.c optional pci cddl/contrib/opensolaris/common/atomic/amd64/opensolaris_atomic.S optional zfs | dtrace compile-with "${ZFS_S}" cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/amd64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/x86/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/dtrace/x86/dis_tables.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" cddl/dev/dtrace/x86/instr_size.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" crypto/aesni/aeskeys_amd64.S optional aesni crypto/aesni/aesni.c optional aesni aesni_ghash.o optional aesni \ dependency "$S/crypto/aesni/aesni_ghash.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_ghash.o" aesni_wrap.o optional aesni \ dependency "$S/crypto/aesni/aesni_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_wrap.o" crypto/blowfish/bf_enc.c optional crypto | ipsec | ipsec_support crypto/des/des_enc.c optional crypto | ipsec | \ ipsec_support | netsmb crypto/via/padlock.c optional padlock crypto/via/padlock_cipher.c optional padlock crypto/via/padlock_hash.c optional padlock dev/acpica/acpi_if.m standard dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_timer.c optional acpi dev/acpi_support/acpi_wmi_if.m standard dev/agp/agp_amd64.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_via.c optional agp dev/amdsbwd/amdsbwd.c optional amdsbwd dev/amdtemp/amdtemp.c optional amdtemp dev/arcmsr/arcmsr.c optional arcmsr pci dev/asmc/asmc.c optional asmc isa dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/bxe/bxe.c optional bxe pci dev/bxe/bxe_stats.c optional bxe pci dev/bxe/bxe_debug.c optional bxe pci dev/bxe/ecore_sp.c optional bxe pci dev/bxe/bxe_elink.c optional bxe pci dev/bxe/57710_init_values.c optional bxe pci dev/bxe/57711_init_values.c optional bxe pci dev/bxe/57712_init_values.c optional bxe pci dev/coretemp/coretemp.c optional coretemp dev/cpuctl/cpuctl.c optional cpuctl dev/dpms/dpms.c optional dpms # There are no systems with isa slots, so all ed isa entries should go.. dev/ed/if_ed_3c503.c optional ed isa ed_3c503 dev/ed/if_ed_isa.c optional ed isa dev/ed/if_ed_wd80x3.c optional ed isa dev/ed/if_ed_hpp.c optional ed isa ed_hpp dev/ed/if_ed_sic.c optional ed isa ed_sic dev/fb/fb.c optional fb | vga dev/fb/s3_pci.c optional s3pci dev/fb/vesa.c optional vga vesa dev/fb/vga.c optional vga dev/ichwd/ichwd.c optional ichwd dev/if_ndis/if_ndis.c optional ndis dev/if_ndis/if_ndis_pccard.c optional ndis pccard dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci dev/if_ndis/if_ndis_usb.c optional ndis usb dev/intel/spi.c optional intelspi dev/io/iodev.c optional io dev/ioat/ioat.c optional ioat pci dev/ioat/ioat_test.c optional ioat pci dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_isa.c optional ipmi isa dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_linux.c optional ipmi compat_linux32 dev/ixl/if_ixl.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_main.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_qmgr.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iov.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_i2c.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_iw.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/if_ixlv.c optional ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixlvc.c optional ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_txrx.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_osdep.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_lan_hmc.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_hmc.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_common.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_nvm.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_adminq.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa dev/fdc/fdc_pccard.c optional fdc pccard dev/gpio/bytgpio.c optional bytgpio dev/hpt27xx/hpt27xx_os_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_osm_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_config.c optional hpt27xx dev/hptmv/entry.c optional hptmv dev/hptmv/mv.c optional hptmv dev/hptmv/gui_lib.c optional hptmv dev/hptmv/hptproc.c optional hptmv dev/hptmv/ioctl.c optional hptmv dev/hptnr/hptnr_os_bsd.c optional hptnr dev/hptnr/hptnr_osm_bsd.c optional hptnr dev/hptnr/hptnr_config.c optional hptnr dev/hptrr/hptrr_os_bsd.c optional hptrr dev/hptrr/hptrr_osm_bsd.c optional hptrr dev/hptrr/hptrr_config.c optional hptrr dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_uncore.c optional hwpmc dev/hwpmc/hwpmc_piv.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/hyperv/input/hv_kbd.c optional hyperv dev/hyperv/input/hv_kbdc.c optional hyperv dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci dev/hyperv/netvsc/hn_nvs.c optional hyperv dev/hyperv/netvsc/hn_rndis.c optional hyperv dev/hyperv/netvsc/if_hn.c optional hyperv dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv dev/hyperv/utilities/hv_kvp.c optional hyperv dev/hyperv/utilities/hv_snapshot.c optional hyperv dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv dev/hyperv/utilities/vmbus_ic.c optional hyperv dev/hyperv/utilities/vmbus_shutdown.c optional hyperv dev/hyperv/utilities/vmbus_timesync.c optional hyperv dev/hyperv/vmbus/hyperv.c optional hyperv dev/hyperv/vmbus/hyperv_busdma.c optional hyperv dev/hyperv/vmbus/vmbus.c optional hyperv pci dev/hyperv/vmbus/vmbus_br.c optional hyperv dev/hyperv/vmbus/vmbus_chan.c optional hyperv dev/hyperv/vmbus/vmbus_et.c optional hyperv dev/hyperv/vmbus/vmbus_if.m optional hyperv dev/hyperv/vmbus/vmbus_res.c optional hyperv dev/hyperv/vmbus/vmbus_xact.c optional hyperv dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv dev/nctgpio/nctgpio.c optional nctgpio dev/nfe/if_nfe.c optional nfe pci dev/ntb/if_ntb/if_ntb.c optional if_ntb dev/ntb/ntb_transport.c optional if_ntb dev/ntb/ntb.c optional if_ntb | ntb_hw dev/ntb/ntb_if.m optional if_ntb | ntb_hw dev/ntb/ntb_hw/ntb_hw.c optional ntb_hw dev/nvd/nvd.c optional nvd nvme dev/nvme/nvme.c optional nvme dev/nvme/nvme_ctrlr.c optional nvme dev/nvme/nvme_ctrlr_cmd.c optional nvme dev/nvme/nvme_ns.c optional nvme dev/nvme/nvme_ns_cmd.c optional nvme dev/nvme/nvme_qpair.c optional nvme dev/nvme/nvme_sim.c optional nvme scbus !nvd dev/nvme/nvme_sysctl.c optional nvme dev/nvme/nvme_test.c optional nvme dev/nvme/nvme_util.c optional nvme dev/nvram/nvram.c optional nvram isa dev/random/ivy.c optional rdrand_rng dev/random/nehemiah.c optional padlock_rng dev/qlxge/qls_dbg.c optional qlxge pci dev/qlxge/qls_dump.c optional qlxge pci dev/qlxge/qls_hw.c optional qlxge pci dev/qlxge/qls_ioctl.c optional qlxge pci dev/qlxge/qls_isr.c optional qlxge pci dev/qlxge/qls_os.c optional qlxge pci dev/qlxgb/qla_dbg.c optional qlxgb pci dev/qlxgb/qla_hw.c optional qlxgb pci dev/qlxgb/qla_ioctl.c optional qlxgb pci dev/qlxgb/qla_isr.c optional qlxgb pci dev/qlxgb/qla_misc.c optional qlxgb pci dev/qlxgb/qla_os.c optional qlxgb pci dev/qlxgbe/ql_dbg.c optional qlxgbe pci dev/qlxgbe/ql_hw.c optional qlxgbe pci dev/qlxgbe/ql_ioctl.c optional qlxgbe pci dev/qlxgbe/ql_isr.c optional qlxgbe pci dev/qlxgbe/ql_misc.c optional qlxgbe pci dev/qlxgbe/ql_os.c optional qlxgbe pci dev/qlxgbe/ql_reset.c optional qlxgbe pci dev/qlnx/qlnxe/ecore_cxt.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dcbx.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dev.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_hw.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_ops.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_int.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_l2.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_mcp.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_sp_commands.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_spq.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_ioctl.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_os.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/sfxge/common/ef10_ev.c optional sfxge pci dev/sfxge/common/ef10_filter.c optional sfxge pci dev/sfxge/common/ef10_intr.c optional sfxge pci dev/sfxge/common/ef10_mac.c optional sfxge pci dev/sfxge/common/ef10_mcdi.c optional sfxge pci dev/sfxge/common/ef10_nic.c optional sfxge pci dev/sfxge/common/ef10_nvram.c optional sfxge pci dev/sfxge/common/ef10_phy.c optional sfxge pci dev/sfxge/common/ef10_rx.c optional sfxge pci dev/sfxge/common/ef10_tx.c optional sfxge pci dev/sfxge/common/ef10_vpd.c optional sfxge pci dev/sfxge/common/efx_bootcfg.c optional sfxge pci dev/sfxge/common/efx_crc32.c optional sfxge pci dev/sfxge/common/efx_ev.c optional sfxge pci dev/sfxge/common/efx_filter.c optional sfxge pci dev/sfxge/common/efx_hash.c optional sfxge pci dev/sfxge/common/efx_intr.c optional sfxge pci dev/sfxge/common/efx_lic.c optional sfxge pci dev/sfxge/common/efx_mac.c optional sfxge pci dev/sfxge/common/efx_mcdi.c optional sfxge pci dev/sfxge/common/efx_mon.c optional sfxge pci dev/sfxge/common/efx_nic.c optional sfxge pci dev/sfxge/common/efx_nvram.c optional sfxge pci dev/sfxge/common/efx_phy.c optional sfxge pci dev/sfxge/common/efx_port.c optional sfxge pci dev/sfxge/common/efx_rx.c optional sfxge pci dev/sfxge/common/efx_sram.c optional sfxge pci dev/sfxge/common/efx_tx.c optional sfxge pci dev/sfxge/common/efx_vpd.c optional sfxge pci dev/sfxge/common/hunt_nic.c optional sfxge pci dev/sfxge/common/mcdi_mon.c optional sfxge pci dev/sfxge/common/medford_nic.c optional sfxge pci dev/sfxge/common/siena_mac.c optional sfxge pci dev/sfxge/common/siena_mcdi.c optional sfxge pci dev/sfxge/common/siena_nic.c optional sfxge pci dev/sfxge/common/siena_nvram.c optional sfxge pci dev/sfxge/common/siena_phy.c optional sfxge pci dev/sfxge/common/siena_sram.c optional sfxge pci dev/sfxge/common/siena_vpd.c optional sfxge pci dev/sfxge/sfxge.c optional sfxge pci dev/sfxge/sfxge_dma.c optional sfxge pci dev/sfxge/sfxge_ev.c optional sfxge pci dev/sfxge/sfxge_intr.c optional sfxge pci dev/sfxge/sfxge_mcdi.c optional sfxge pci dev/sfxge/sfxge_nvram.c optional sfxge pci dev/sfxge/sfxge_port.c optional sfxge pci dev/sfxge/sfxge_rx.c optional sfxge pci dev/sfxge/sfxge_tx.c optional sfxge pci dev/sio/sio.c optional sio dev/sio/sio_isa.c optional sio isa dev/sio/sio_pccard.c optional sio pccard dev/sio/sio_pci.c optional sio pci dev/sio/sio_puc.c optional sio puc dev/speaker/spkr.c optional speaker dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scterm-teken.c optional sc dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/syscons/scvtb.c optional sc dev/tpm/tpm.c optional tpm dev/tpm/tpm_acpi.c optional tpm acpi dev/tpm/tpm_isa.c optional tpm isa dev/uart/uart_cpu_x86.c optional uart dev/viawd/viawd.c optional viawd dev/vmware/vmxnet3/if_vmx.c optional vmx dev/wbwd/wbwd.c optional wbwd dev/wpi/if_wpi.c optional wpi dev/xen/pci/xen_acpi_pci.c optional xenhvm dev/xen/pci/xen_pci.c optional xenhvm dev/isci/isci.c optional isci dev/isci/isci_controller.c optional isci dev/isci/isci_domain.c optional isci dev/isci/isci_interrupt.c optional isci dev/isci/isci_io_request.c optional isci dev/isci/isci_logger.c optional isci dev/isci/isci_oem_parameters.c optional isci dev/isci/isci_remote_device.c optional isci dev/isci/isci_sysctl.c optional isci dev/isci/isci_task_request.c optional isci dev/isci/isci_timer.c optional isci dev/isci/scil/sati.c optional isci dev/isci/scil/sati_abort_task_set.c optional isci dev/isci/scil/sati_atapi.c optional isci dev/isci/scil/sati_device.c optional isci dev/isci/scil/sati_inquiry.c optional isci dev/isci/scil/sati_log_sense.c optional isci dev/isci/scil/sati_lun_reset.c optional isci dev/isci/scil/sati_mode_pages.c optional isci dev/isci/scil/sati_mode_select.c optional isci dev/isci/scil/sati_mode_sense.c optional isci dev/isci/scil/sati_mode_sense_10.c optional isci dev/isci/scil/sati_mode_sense_6.c optional isci dev/isci/scil/sati_move.c optional isci dev/isci/scil/sati_passthrough.c optional isci dev/isci/scil/sati_read.c optional isci dev/isci/scil/sati_read_buffer.c optional isci dev/isci/scil/sati_read_capacity.c optional isci dev/isci/scil/sati_reassign_blocks.c optional isci dev/isci/scil/sati_report_luns.c optional isci dev/isci/scil/sati_request_sense.c optional isci dev/isci/scil/sati_start_stop_unit.c optional isci dev/isci/scil/sati_synchronize_cache.c optional isci dev/isci/scil/sati_test_unit_ready.c optional isci dev/isci/scil/sati_unmap.c optional isci dev/isci/scil/sati_util.c optional isci dev/isci/scil/sati_verify.c optional isci dev/isci/scil/sati_write.c optional isci dev/isci/scil/sati_write_and_verify.c optional isci dev/isci/scil/sati_write_buffer.c optional isci dev/isci/scil/sati_write_long.c optional isci dev/isci/scil/sci_abstract_list.c optional isci dev/isci/scil/sci_base_controller.c optional isci dev/isci/scil/sci_base_domain.c optional isci dev/isci/scil/sci_base_iterator.c optional isci dev/isci/scil/sci_base_library.c optional isci dev/isci/scil/sci_base_logger.c optional isci dev/isci/scil/sci_base_memory_descriptor_list.c optional isci dev/isci/scil/sci_base_memory_descriptor_list_decorator.c optional isci dev/isci/scil/sci_base_object.c optional isci dev/isci/scil/sci_base_observer.c optional isci dev/isci/scil/sci_base_phy.c optional isci dev/isci/scil/sci_base_port.c optional isci dev/isci/scil/sci_base_remote_device.c optional isci dev/isci/scil/sci_base_request.c optional isci dev/isci/scil/sci_base_state_machine.c optional isci dev/isci/scil/sci_base_state_machine_logger.c optional isci dev/isci/scil/sci_base_state_machine_observer.c optional isci dev/isci/scil/sci_base_subject.c optional isci dev/isci/scil/sci_util.c optional isci dev/isci/scil/scic_sds_controller.c optional isci dev/isci/scil/scic_sds_library.c optional isci dev/isci/scil/scic_sds_pci.c optional isci dev/isci/scil/scic_sds_phy.c optional isci dev/isci/scil/scic_sds_port.c optional isci dev/isci/scil/scic_sds_port_configuration_agent.c optional isci dev/isci/scil/scic_sds_remote_device.c optional isci dev/isci/scil/scic_sds_remote_node_context.c optional isci dev/isci/scil/scic_sds_remote_node_table.c optional isci dev/isci/scil/scic_sds_request.c optional isci dev/isci/scil/scic_sds_sgpio.c optional isci dev/isci/scil/scic_sds_smp_remote_device.c optional isci dev/isci/scil/scic_sds_smp_request.c optional isci dev/isci/scil/scic_sds_ssp_request.c optional isci dev/isci/scil/scic_sds_stp_packet_request.c optional isci dev/isci/scil/scic_sds_stp_remote_device.c optional isci dev/isci/scil/scic_sds_stp_request.c optional isci dev/isci/scil/scic_sds_unsolicited_frame_control.c optional isci dev/isci/scil/scif_sas_controller.c optional isci dev/isci/scil/scif_sas_controller_state_handlers.c optional isci dev/isci/scil/scif_sas_controller_states.c optional isci dev/isci/scil/scif_sas_domain.c optional isci dev/isci/scil/scif_sas_domain_state_handlers.c optional isci dev/isci/scil/scif_sas_domain_states.c optional isci dev/isci/scil/scif_sas_high_priority_request_queue.c optional isci dev/isci/scil/scif_sas_internal_io_request.c optional isci dev/isci/scil/scif_sas_io_request.c optional isci dev/isci/scil/scif_sas_io_request_state_handlers.c optional isci dev/isci/scil/scif_sas_io_request_states.c optional isci dev/isci/scil/scif_sas_library.c optional isci dev/isci/scil/scif_sas_remote_device.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substates.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substates.c optional isci dev/isci/scil/scif_sas_remote_device_state_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_states.c optional isci dev/isci/scil/scif_sas_request.c optional isci dev/isci/scil/scif_sas_smp_activity_clear_affiliation.c optional isci dev/isci/scil/scif_sas_smp_io_request.c optional isci dev/isci/scil/scif_sas_smp_phy.c optional isci dev/isci/scil/scif_sas_smp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_io_request.c optional isci dev/isci/scil/scif_sas_stp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_task_request.c optional isci dev/isci/scil/scif_sas_task_request.c optional isci dev/isci/scil/scif_sas_task_request_state_handlers.c optional isci dev/isci/scil/scif_sas_task_request_states.c optional isci dev/isci/scil/scif_sas_timer.c optional isci isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/kern_clocksource.c standard kern/link_elf_obj.c standard libkern/x86/crc32_sse42.c standard libkern/memmove.c standard libkern/memset.c standard # # IA32 binary support # #amd64/ia32/ia32_exception.S optional compat_freebsd32 amd64/ia32/ia32_reg.c optional compat_freebsd32 amd64/ia32/ia32_signal.c optional compat_freebsd32 amd64/ia32/ia32_sigtramp.S optional compat_freebsd32 amd64/ia32/ia32_syscall.c optional compat_freebsd32 amd64/ia32/ia32_misc.c optional compat_freebsd32 compat/ia32/ia32_sysvec.c optional compat_freebsd32 compat/linprocfs/linprocfs.c optional linprocfs compat/linsysfs/linsysfs.c optional linsysfs # # Linux/i386 binary support # amd64/linux32/linux32_dummy.c optional compat_linux32 amd64/linux32/linux32_machdep.c optional compat_linux32 amd64/linux32/linux32_support.s optional compat_linux32 \ dependency "linux32_assym.h" amd64/linux32/linux32_sysent.c optional compat_linux32 amd64/linux32/linux32_sysvec.c optional compat_linux32 compat/linux/linux_emul.c optional compat_linux32 compat/linux/linux_file.c optional compat_linux32 compat/linux/linux_fork.c optional compat_linux32 compat/linux/linux_futex.c optional compat_linux32 compat/linux/linux_getcwd.c optional compat_linux32 compat/linux/linux_ioctl.c optional compat_linux32 compat/linux/linux_ipc.c optional compat_linux32 compat/linux/linux_mib.c optional compat_linux32 compat/linux/linux_misc.c optional compat_linux32 compat/linux/linux_mmap.c optional compat_linux32 compat/linux/linux_signal.c optional compat_linux32 compat/linux/linux_socket.c optional compat_linux32 compat/linux/linux_stats.c optional compat_linux32 compat/linux/linux_sysctl.c optional compat_linux32 compat/linux/linux_time.c optional compat_linux32 compat/linux/linux_timer.c optional compat_linux32 compat/linux/linux_uid16.c optional compat_linux32 compat/linux/linux_util.c optional compat_linux32 compat/linux/linux_vdso.c optional compat_linux32 compat/linux/linux_common.c optional compat_linux32 compat/linux/linux_event.c optional compat_linux32 compat/linux/linux.c optional compat_linux32 dev/amr/amr_linux.c optional compat_linux32 amr dev/mfi/mfi_linux.c optional compat_linux32 mfi # # Windows NDIS driver support # compat/ndis/kern_ndis.c optional ndisapi pci compat/ndis/kern_windrv.c optional ndisapi pci compat/ndis/subr_hal.c optional ndisapi pci compat/ndis/subr_ndis.c optional ndisapi pci compat/ndis/subr_ntoskrnl.c optional ndisapi pci compat/ndis/subr_pe.c optional ndisapi pci compat/ndis/subr_usbd.c optional ndisapi pci compat/ndis/winx64_wrap.S optional ndisapi pci # # x86 real mode BIOS emulator, required by dpms/pci/vesa # compat/x86bios/x86bios.c optional x86bios | dpms | pci | vesa contrib/x86emu/x86emu.c optional x86bios | dpms | pci | vesa # # bvm console # dev/bvm/bvm_console.c optional bvmconsole dev/bvm/bvm_dbg.c optional bvmdebug # # x86 shared code between IA32 and AMD64 architectures # x86/acpica/OsdEnvironment.c optional acpi x86/acpica/acpi_apm.c optional acpi x86/acpica/acpi_wakeup.c optional acpi x86/acpica/madt.c optional acpi x86/acpica/srat.c optional acpi x86/bios/smbios.c optional smbios x86/bios/vpd.c optional vpd x86/cpufreq/powernow.c optional cpufreq x86/cpufreq/est.c optional cpufreq x86/cpufreq/hwpstate.c optional cpufreq x86/cpufreq/p4tcc.c optional cpufreq x86/iommu/busdma_dmar.c optional acpi acpi_dmar pci x86/iommu/intel_ctx.c optional acpi acpi_dmar pci x86/iommu/intel_drv.c optional acpi acpi_dmar pci x86/iommu/intel_fault.c optional acpi acpi_dmar pci x86/iommu/intel_gas.c optional acpi acpi_dmar pci x86/iommu/intel_idpgtbl.c optional acpi acpi_dmar pci x86/iommu/intel_intrmap.c optional acpi acpi_dmar pci x86/iommu/intel_qi.c optional acpi acpi_dmar pci x86/iommu/intel_quirks.c optional acpi acpi_dmar pci x86/iommu/intel_utils.c optional acpi acpi_dmar pci x86/isa/atpic.c optional atpic isa x86/isa/atrtc.c standard x86/isa/clock.c standard x86/isa/elcr.c optional atpic isa | mptable x86/isa/isa.c standard x86/isa/isa_dma.c standard x86/isa/nmi.c standard x86/isa/orm.c optional isa x86/pci/pci_bus.c optional pci x86/pci/qpi.c optional pci x86/x86/autoconf.c standard x86/x86/bus_machdep.c standard x86/x86/busdma_bounce.c standard x86/x86/busdma_machdep.c standard x86/x86/cpu_machdep.c standard x86/x86/dump_machdep.c standard x86/x86/fdt_machdep.c optional fdt x86/x86/identcpu.c standard x86/x86/intr_machdep.c standard x86/x86/io_apic.c standard x86/x86/legacy.c standard x86/x86/local_apic.c standard x86/x86/mca.c standard x86/x86/x86_mem.c optional mem x86/x86/mptable.c optional mptable x86/x86/mptable_pci.c optional mptable pci x86/x86/mp_x86.c optional smp x86/x86/mp_watchdog.c optional mp_watchdog smp x86/x86/msi.c optional pci x86/x86/nexus.c standard x86/x86/pvclock.c standard x86/x86/stack_machdep.c optional ddb | stack x86/x86/tsc.c standard x86/x86/delay.c standard x86/xen/hvm.c optional xenhvm x86/xen/xen_intr.c optional xenhvm x86/xen/pv.c optional xenhvm x86/xen/pvcpu_enum.c optional xenhvm x86/xen/xen_apic.c optional xenhvm x86/xen/xenpv.c optional xenhvm x86/xen/xen_nexus.c optional xenhvm x86/xen/xen_msi.c optional xenhvm x86/xen/xen_pci_bus.c optional xenhvm Index: projects/clang500-import/sys/conf/files.i386 =================================================================== --- projects/clang500-import/sys/conf/files.i386 (revision 319548) +++ projects/clang500-import/sys/conf/files.i386 (revision 319549) @@ -1,617 +1,617 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_i686.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_i686.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf32-i386-freebsd --binary-architecture i386 cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # linux_genassym.o optional compat_linux \ dependency "$S/i386/linux/linux_genassym.c" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "linux_genassym.o" # linux_assym.h optional compat_linux \ dependency "$S/kern/genassym.sh linux_genassym.o" \ compile-with "sh $S/kern/genassym.sh linux_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "linux_assym.h" # linux_locore.o optional compat_linux \ dependency "linux_assym.h $S/i386/linux/linux_locore.s" \ - compile-with "${CC} -x assembler-with-cpp -DLOCORE -shared -s -pipe -I. -I$S -Werror -Wall -fno-common -nostdinc -nostdlib -Wl,-T$S/i386/linux/linux_vdso.lds.s -Wl,-soname=linux_vdso.so,--eh-frame-hdr,-fPIC,-warn-common ${.IMPSRC} -o ${.TARGET}" \ + compile-with "${CC} -x assembler-with-cpp -DLOCORE -shared -s -pipe -I. -I$S -Werror -Wall -fPIC -fno-common -nostdinc -nostdlib -Wl,-T$S/i386/linux/linux_vdso.lds.s -Wl,-soname=linux_vdso.so,--eh-frame-hdr,-warn-common ${.IMPSRC} -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "linux_locore.o" # linux_vdso.so optional compat_linux \ dependency "linux_locore.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf32-i386-freebsd --binary-architecture i386 linux_locore.o ${.TARGET}" \ no-implicit-rule \ clean "linux_vdso.so" # font.h optional sc_dflt_font \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'static u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'static u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'static u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" # ukbdmap.h optional ukbd_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${UKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > ukbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" # hpt27xx_lib.o optional hpt27xx \ dependency "$S/dev/hpt27xx/i386-elf.hpt27xx_lib.o.uu" \ compile-with "uudecode < $S/dev/hpt27xx/i386-elf.hpt27xx_lib.o.uu" \ no-implicit-rule # hptmvraid.o optional hptmv \ dependency "$S/dev/hptmv/i386-elf.raid.o.uu" \ compile-with "uudecode < $S/dev/hptmv/i386-elf.raid.o.uu" \ no-implicit-rule # hptnr_lib.o optional hptnr \ dependency "$S/dev/hptnr/i386-elf.hptnr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptnr/i386-elf.hptnr_lib.o.uu" \ no-implicit-rule # hptrr_lib.o optional hptrr \ dependency "$S/dev/hptrr/i386-elf.hptrr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptrr/i386-elf.hptrr_lib.o.uu" \ no-implicit-rule # cddl/contrib/opensolaris/common/atomic/i386/opensolaris_atomic.S optional zfs | dtrace compile-with "${ZFS_S}" cddl/dev/dtrace/i386/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/i386/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/x86/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/dtrace/x86/dis_tables.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" cddl/dev/dtrace/x86/instr_size.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" compat/linprocfs/linprocfs.c optional linprocfs compat/linsysfs/linsysfs.c optional linsysfs compat/linux/linux_event.c optional compat_linux compat/linux/linux_emul.c optional compat_linux compat/linux/linux_file.c optional compat_linux compat/linux/linux_fork.c optional compat_linux compat/linux/linux_futex.c optional compat_linux compat/linux/linux_getcwd.c optional compat_linux compat/linux/linux_ioctl.c optional compat_linux compat/linux/linux_ipc.c optional compat_linux compat/linux/linux_mib.c optional compat_linux compat/linux/linux_misc.c optional compat_linux compat/linux/linux_mmap.c optional compat_linux compat/linux/linux_signal.c optional compat_linux compat/linux/linux_socket.c optional compat_linux compat/linux/linux_stats.c optional compat_linux compat/linux/linux_sysctl.c optional compat_linux compat/linux/linux_time.c optional compat_linux compat/linux/linux_timer.c optional compat_linux compat/linux/linux_uid16.c optional compat_linux compat/linux/linux_util.c optional compat_linux compat/linux/linux_vdso.c optional compat_linux compat/linux/linux.c optional compat_linux compat/ndis/kern_ndis.c optional ndisapi pci compat/ndis/kern_windrv.c optional ndisapi pci compat/ndis/subr_hal.c optional ndisapi pci compat/ndis/subr_ndis.c optional ndisapi pci compat/ndis/subr_ntoskrnl.c optional ndisapi pci compat/ndis/subr_pe.c optional ndisapi pci compat/ndis/subr_usbd.c optional ndisapi pci compat/ndis/winx32_wrap.S optional ndisapi pci bf_enc.o optional crypto | ipsec | ipsec_support \ dependency "$S/crypto/blowfish/arch/i386/bf_enc.S $S/crypto/blowfish/arch/i386/bf_enc_586.S $S/crypto/blowfish/arch/i386/bf_enc_686.S" \ compile-with "${CC} -c -I$S/crypto/blowfish/arch/i386 ${ASM_CFLAGS} ${WERROR} ${.IMPSRC}" \ no-implicit-rule crypto/aesni/aeskeys_i386.S optional aesni crypto/aesni/aesni.c optional aesni aesni_ghash.o optional aesni \ dependency "$S/crypto/aesni/aesni_ghash.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_ghash.o" aesni_wrap.o optional aesni \ dependency "$S/crypto/aesni/aesni_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_wrap.o" crypto/des/arch/i386/des_enc.S optional crypto | ipsec | ipsec_support | netsmb crypto/via/padlock.c optional padlock crypto/via/padlock_cipher.c optional padlock crypto/via/padlock_hash.c optional padlock dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/advansys/adv_isa.c optional adv isa dev/agp/agp_ali.c optional agp dev/agp/agp_amd.c optional agp dev/agp/agp_amd64.c optional agp dev/agp/agp_ati.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_intel.c optional agp dev/agp/agp_nvidia.c optional agp dev/agp/agp_sis.c optional agp dev/agp/agp_via.c optional agp dev/aic/aic_isa.c optional aic isa dev/amdsbwd/amdsbwd.c optional amdsbwd dev/amdtemp/amdtemp.c optional amdtemp dev/arcmsr/arcmsr.c optional arcmsr pci dev/asmc/asmc.c optional asmc isa dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/bxe/bxe.c optional bxe pci dev/bxe/bxe_stats.c optional bxe pci dev/bxe/bxe_debug.c optional bxe pci dev/bxe/ecore_sp.c optional bxe pci dev/bxe/bxe_elink.c optional bxe pci dev/bxe/57710_init_values.c optional bxe pci dev/bxe/57711_init_values.c optional bxe pci dev/bxe/57712_init_values.c optional bxe pci dev/ce/ceddk.c optional ce dev/ce/if_ce.c optional ce dev/ce/tau32-ddk.c optional ce \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/cm/if_cm_isa.c optional cm isa dev/coretemp/coretemp.c optional coretemp dev/cp/cpddk.c optional cp dev/cp/if_cp.c optional cp dev/cpuctl/cpuctl.c optional cpuctl dev/ctau/ctau.c optional ctau dev/ctau/ctddk.c optional ctau dev/ctau/if_ct.c optional ctau dev/cx/csigma.c optional cx dev/cx/cxddk.c optional cx dev/cx/if_cx.c optional cx dev/dpms/dpms.c optional dpms dev/ed/if_ed_3c503.c optional ed isa ed_3c503 dev/ed/if_ed_isa.c optional ed isa dev/ed/if_ed_wd80x3.c optional ed isa dev/ed/if_ed_hpp.c optional ed isa ed_hpp dev/ed/if_ed_sic.c optional ed isa ed_sic dev/fb/fb.c optional fb | vga dev/fb/s3_pci.c optional s3pci dev/fb/vesa.c optional vga vesa dev/fb/vga.c optional vga dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa dev/fdc/fdc_pccard.c optional fdc pccard dev/fe/if_fe_isa.c optional fe isa dev/glxiic/glxiic.c optional glxiic dev/glxsb/glxsb.c optional glxsb dev/glxsb/glxsb_hash.c optional glxsb dev/gpio/bytgpio.c optional bytgpio dev/hpt27xx/hpt27xx_os_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_osm_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_config.c optional hpt27xx dev/hptmv/entry.c optional hptmv dev/hptmv/mv.c optional hptmv dev/hptmv/gui_lib.c optional hptmv dev/hptmv/hptproc.c optional hptmv dev/hptmv/ioctl.c optional hptmv dev/hptnr/hptnr_os_bsd.c optional hptnr dev/hptnr/hptnr_osm_bsd.c optional hptnr dev/hptnr/hptnr_config.c optional hptnr dev/hptrr/hptrr_os_bsd.c optional hptrr dev/hptrr/hptrr_osm_bsd.c optional hptrr dev/hptrr/hptrr_config.c optional hptrr dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_uncore.c optional hwpmc dev/hwpmc/hwpmc_pentium.c optional hwpmc dev/hwpmc/hwpmc_piv.c optional hwpmc dev/hwpmc/hwpmc_ppro.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci dev/hyperv/netvsc/hn_nvs.c optional hyperv dev/hyperv/netvsc/hn_rndis.c optional hyperv dev/hyperv/netvsc/if_hn.c optional hyperv dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv dev/hyperv/utilities/hv_kvp.c optional hyperv dev/hyperv/utilities/hv_snapshot.c optional hyperv dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv dev/hyperv/utilities/vmbus_ic.c optional hyperv dev/hyperv/utilities/vmbus_shutdown.c optional hyperv dev/hyperv/utilities/vmbus_timesync.c optional hyperv dev/hyperv/vmbus/hyperv.c optional hyperv dev/hyperv/vmbus/hyperv_busdma.c optional hyperv dev/hyperv/vmbus/vmbus.c optional hyperv pci dev/hyperv/vmbus/vmbus_br.c optional hyperv dev/hyperv/vmbus/vmbus_chan.c optional hyperv dev/hyperv/vmbus/vmbus_et.c optional hyperv dev/hyperv/vmbus/vmbus_if.m optional hyperv dev/hyperv/vmbus/vmbus_res.c optional hyperv dev/hyperv/vmbus/vmbus_xact.c optional hyperv dev/hyperv/vmbus/i386/hyperv_machdep.c optional hyperv dev/hyperv/vmbus/i386/vmbus_vector.S optional hyperv dev/ichwd/ichwd.c optional ichwd dev/if_ndis/if_ndis.c optional ndis dev/if_ndis/if_ndis_pccard.c optional ndis pccard dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci dev/if_ndis/if_ndis_usb.c optional ndis usb dev/intel/spi.c optional intelspi dev/io/iodev.c optional io dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_isa.c optional ipmi isa dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_linux.c optional ipmi compat_linux dev/le/if_le_isa.c optional le isa dev/mse/mse.c optional mse dev/mse/mse_isa.c optional mse isa dev/nctgpio/nctgpio.c optional nctgpio dev/nfe/if_nfe.c optional nfe pci dev/ntb/if_ntb/if_ntb.c optional if_ntb dev/ntb/ntb_transport.c optional if_ntb dev/ntb/ntb.c optional if_ntb | ntb_hw dev/ntb/ntb_if.m optional if_ntb | ntb_hw dev/ntb/ntb_hw/ntb_hw.c optional ntb_hw dev/nvd/nvd.c optional nvd nvme dev/nvme/nvme.c optional nvme dev/nvme/nvme_ctrlr.c optional nvme dev/nvme/nvme_ctrlr_cmd.c optional nvme dev/nvme/nvme_ns.c optional nvme dev/nvme/nvme_ns_cmd.c optional nvme dev/nvme/nvme_qpair.c optional nvme dev/nvme/nvme_sysctl.c optional nvme dev/nvme/nvme_test.c optional nvme dev/nvme/nvme_util.c optional nvme dev/nvram/nvram.c optional nvram isa dev/ofw/ofwpci.c optional fdt pci dev/pcf/pcf_isa.c optional pcf dev/random/ivy.c optional rdrand_rng dev/random/nehemiah.c optional padlock_rng dev/sbni/if_sbni.c optional sbni dev/sbni/if_sbni_isa.c optional sbni isa dev/sbni/if_sbni_pci.c optional sbni pci dev/sio/sio.c optional sio dev/sio/sio_isa.c optional sio isa dev/sio/sio_pccard.c optional sio pccard dev/sio/sio_pci.c optional sio pci dev/sio/sio_puc.c optional sio puc dev/speaker/spkr.c optional speaker dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scterm-teken.c optional sc dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/syscons/scvtb.c optional sc dev/tpm/tpm.c optional tpm dev/tpm/tpm_acpi.c optional tpm acpi dev/tpm/tpm_isa.c optional tpm isa dev/uart/uart_cpu_x86.c optional uart dev/viawd/viawd.c optional viawd dev/vmware/vmxnet3/if_vmx.c optional vmx dev/acpica/acpi_if.m standard dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_timer.c optional acpi dev/acpi_support/acpi_wmi_if.m standard dev/wbwd/wbwd.c optional wbwd dev/wpi/if_wpi.c optional wpi dev/isci/isci.c optional isci dev/isci/isci_controller.c optional isci dev/isci/isci_domain.c optional isci dev/isci/isci_interrupt.c optional isci dev/isci/isci_io_request.c optional isci dev/isci/isci_logger.c optional isci dev/isci/isci_oem_parameters.c optional isci dev/isci/isci_remote_device.c optional isci dev/isci/isci_sysctl.c optional isci dev/isci/isci_task_request.c optional isci dev/isci/isci_timer.c optional isci dev/isci/scil/sati.c optional isci dev/isci/scil/sati_abort_task_set.c optional isci dev/isci/scil/sati_atapi.c optional isci dev/isci/scil/sati_device.c optional isci dev/isci/scil/sati_inquiry.c optional isci dev/isci/scil/sati_log_sense.c optional isci dev/isci/scil/sati_lun_reset.c optional isci dev/isci/scil/sati_mode_pages.c optional isci dev/isci/scil/sati_mode_select.c optional isci dev/isci/scil/sati_mode_sense.c optional isci dev/isci/scil/sati_mode_sense_10.c optional isci dev/isci/scil/sati_mode_sense_6.c optional isci dev/isci/scil/sati_move.c optional isci dev/isci/scil/sati_passthrough.c optional isci dev/isci/scil/sati_read.c optional isci dev/isci/scil/sati_read_buffer.c optional isci dev/isci/scil/sati_read_capacity.c optional isci dev/isci/scil/sati_reassign_blocks.c optional isci dev/isci/scil/sati_report_luns.c optional isci dev/isci/scil/sati_request_sense.c optional isci dev/isci/scil/sati_start_stop_unit.c optional isci dev/isci/scil/sati_synchronize_cache.c optional isci dev/isci/scil/sati_test_unit_ready.c optional isci dev/isci/scil/sati_unmap.c optional isci dev/isci/scil/sati_util.c optional isci dev/isci/scil/sati_verify.c optional isci dev/isci/scil/sati_write.c optional isci dev/isci/scil/sati_write_and_verify.c optional isci dev/isci/scil/sati_write_buffer.c optional isci dev/isci/scil/sati_write_long.c optional isci dev/isci/scil/sci_abstract_list.c optional isci dev/isci/scil/sci_base_controller.c optional isci dev/isci/scil/sci_base_domain.c optional isci dev/isci/scil/sci_base_iterator.c optional isci dev/isci/scil/sci_base_library.c optional isci dev/isci/scil/sci_base_logger.c optional isci dev/isci/scil/sci_base_memory_descriptor_list.c optional isci dev/isci/scil/sci_base_memory_descriptor_list_decorator.c optional isci dev/isci/scil/sci_base_object.c optional isci dev/isci/scil/sci_base_observer.c optional isci dev/isci/scil/sci_base_phy.c optional isci dev/isci/scil/sci_base_port.c optional isci dev/isci/scil/sci_base_remote_device.c optional isci dev/isci/scil/sci_base_request.c optional isci dev/isci/scil/sci_base_state_machine.c optional isci dev/isci/scil/sci_base_state_machine_logger.c optional isci dev/isci/scil/sci_base_state_machine_observer.c optional isci dev/isci/scil/sci_base_subject.c optional isci dev/isci/scil/sci_util.c optional isci dev/isci/scil/scic_sds_controller.c optional isci dev/isci/scil/scic_sds_library.c optional isci dev/isci/scil/scic_sds_pci.c optional isci dev/isci/scil/scic_sds_phy.c optional isci dev/isci/scil/scic_sds_port.c optional isci dev/isci/scil/scic_sds_port_configuration_agent.c optional isci dev/isci/scil/scic_sds_remote_device.c optional isci dev/isci/scil/scic_sds_remote_node_context.c optional isci dev/isci/scil/scic_sds_remote_node_table.c optional isci dev/isci/scil/scic_sds_request.c optional isci dev/isci/scil/scic_sds_sgpio.c optional isci dev/isci/scil/scic_sds_smp_remote_device.c optional isci dev/isci/scil/scic_sds_smp_request.c optional isci dev/isci/scil/scic_sds_ssp_request.c optional isci dev/isci/scil/scic_sds_stp_packet_request.c optional isci dev/isci/scil/scic_sds_stp_remote_device.c optional isci dev/isci/scil/scic_sds_stp_request.c optional isci dev/isci/scil/scic_sds_unsolicited_frame_control.c optional isci dev/isci/scil/scif_sas_controller.c optional isci dev/isci/scil/scif_sas_controller_state_handlers.c optional isci dev/isci/scil/scif_sas_controller_states.c optional isci dev/isci/scil/scif_sas_domain.c optional isci dev/isci/scil/scif_sas_domain_state_handlers.c optional isci dev/isci/scil/scif_sas_domain_states.c optional isci dev/isci/scil/scif_sas_high_priority_request_queue.c optional isci dev/isci/scil/scif_sas_internal_io_request.c optional isci dev/isci/scil/scif_sas_io_request.c optional isci dev/isci/scil/scif_sas_io_request_state_handlers.c optional isci dev/isci/scil/scif_sas_io_request_states.c optional isci dev/isci/scil/scif_sas_library.c optional isci dev/isci/scil/scif_sas_remote_device.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substates.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substates.c optional isci dev/isci/scil/scif_sas_remote_device_state_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_states.c optional isci dev/isci/scil/scif_sas_request.c optional isci dev/isci/scil/scif_sas_smp_activity_clear_affiliation.c optional isci dev/isci/scil/scif_sas_smp_io_request.c optional isci dev/isci/scil/scif_sas_smp_phy.c optional isci dev/isci/scil/scif_sas_smp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_io_request.c optional isci dev/isci/scil/scif_sas_stp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_task_request.c optional isci dev/isci/scil/scif_sas_task_request.c optional isci dev/isci/scil/scif_sas_task_request_state_handlers.c optional isci dev/isci/scil/scif_sas_task_request_states.c optional isci dev/isci/scil/scif_sas_timer.c optional isci i386/acpica/acpi_machdep.c optional acpi acpi_wakecode.o optional acpi \ dependency "$S/i386/acpica/acpi_wakecode.S assym.s" \ compile-with "${NORMAL_S}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.o" acpi_wakecode.bin optional acpi \ dependency "acpi_wakecode.o" \ compile-with "${OBJCOPY} -S -O binary acpi_wakecode.o ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.bin" acpi_wakecode.h optional acpi \ dependency "acpi_wakecode.bin" \ compile-with "file2c -sx 'static char wakecode[] = {' '};' < acpi_wakecode.bin > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h" acpi_wakedata.h optional acpi \ dependency "acpi_wakecode.o" \ compile-with '${NM} -n --defined-only acpi_wakecode.o | while read offset dummy what; do echo "#define $${what} 0x$${offset}"; done > ${.TARGET}' \ no-obj no-implicit-rule before-depend \ clean "acpi_wakedata.h" # i386/bios/apm.c optional apm i386/bios/smapi.c optional smapi i386/bios/smapi_bios.S optional smapi i386/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 #i386/i386/apic_vector.s optional apic i386/i386/atomic.c standard \ compile-with "${CC} -c ${CFLAGS} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}" i386/i386/bios.c standard i386/i386/bioscall.s standard i386/i386/bpf_jit_machdep.c optional bpf_jitter i386/i386/db_disasm.c optional ddb i386/i386/db_interface.c optional ddb i386/i386/db_trace.c optional ddb i386/i386/elan-mmcr.c optional cpu_elan | cpu_soekris i386/i386/elf_machdep.c standard i386/i386/exception.s standard i386/i386/gdb_machdep.c optional gdb i386/i386/geode.c optional cpu_geode i386/i386/in_cksum.c optional inet | inet6 i386/i386/initcpu.c standard i386/i386/io.c optional io i386/i386/k6_mem.c optional mem i386/i386/locore.s standard no-obj i386/i386/longrun.c optional cpu_enable_longrun i386/i386/machdep.c standard i386/i386/mem.c optional mem i386/i386/minidump_machdep.c standard i386/i386/mp_clock.c optional smp i386/i386/mp_machdep.c optional smp i386/i386/mpboot.s optional smp i386/i386/perfmon.c optional perfmon i386/i386/pmap.c standard i386/i386/ptrace_machdep.c standard i386/i386/support.s standard i386/i386/swtch.s standard i386/i386/sys_machdep.c standard i386/i386/trap.c standard i386/i386/uio_machdep.c standard i386/i386/vm86.c standard i386/i386/vm_machdep.c standard i386/ibcs2/ibcs2_errno.c optional ibcs2 i386/ibcs2/ibcs2_fcntl.c optional ibcs2 i386/ibcs2/ibcs2_ioctl.c optional ibcs2 i386/ibcs2/ibcs2_ipc.c optional ibcs2 i386/ibcs2/ibcs2_isc.c optional ibcs2 i386/ibcs2/ibcs2_isc_sysent.c optional ibcs2 i386/ibcs2/ibcs2_misc.c optional ibcs2 i386/ibcs2/ibcs2_msg.c optional ibcs2 i386/ibcs2/ibcs2_other.c optional ibcs2 i386/ibcs2/ibcs2_signal.c optional ibcs2 i386/ibcs2/ibcs2_socksys.c optional ibcs2 i386/ibcs2/ibcs2_stat.c optional ibcs2 i386/ibcs2/ibcs2_sysent.c optional ibcs2 i386/ibcs2/ibcs2_sysi86.c optional ibcs2 i386/ibcs2/ibcs2_sysvec.c optional ibcs2 i386/ibcs2/ibcs2_util.c optional ibcs2 i386/ibcs2/ibcs2_xenix.c optional ibcs2 i386/ibcs2/ibcs2_xenix_sysent.c optional ibcs2 i386/ibcs2/imgact_coff.c optional ibcs2 i386/isa/elink.c optional ep | ie i386/isa/npx.c standard i386/isa/pmtimer.c optional pmtimer i386/isa/prof_machdep.c optional profiling-routine i386/linux/imgact_linux.c optional compat_linux i386/linux/linux_dummy.c optional compat_linux i386/linux/linux_machdep.c optional compat_linux i386/linux/linux_ptrace.c optional compat_linux i386/linux/linux_support.s optional compat_linux \ dependency "linux_assym.h" i386/linux/linux_sysent.c optional compat_linux i386/linux/linux_sysvec.c optional compat_linux i386/pci/pci_cfgreg.c optional pci i386/pci/pci_pir.c optional pci isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/kern_clocksource.c standard kern/imgact_aout.c optional compat_aout kern/imgact_gzip.c optional gzip kern/subr_sfbuf.c standard libkern/divdi3.c standard libkern/ffsll.c standard libkern/flsll.c standard libkern/memmove.c standard libkern/memset.c standard libkern/moddi3.c standard libkern/qdivrem.c standard libkern/ucmpdi2.c standard libkern/udivdi3.c standard libkern/umoddi3.c standard libkern/x86/crc32_sse42.c standard i386/xbox/xbox.c optional xbox i386/xbox/xboxfb.c optional xboxfb dev/fb/boot_font.c optional xboxfb i386/xbox/pic16l.s optional xbox # # x86 real mode BIOS support, required by dpms/pci/vesa # compat/x86bios/x86bios.c optional x86bios | dpms | pci | vesa # # bvm console # dev/bvm/bvm_console.c optional bvmconsole dev/bvm/bvm_dbg.c optional bvmdebug # # x86 shared code between IA32 and AMD64 architectures # x86/acpica/OsdEnvironment.c optional acpi x86/acpica/acpi_apm.c optional acpi x86/acpica/acpi_wakeup.c optional acpi x86/acpica/madt.c optional acpi apic x86/acpica/srat.c optional acpi x86/bios/smbios.c optional smbios x86/bios/vpd.c optional vpd x86/cpufreq/est.c optional cpufreq x86/cpufreq/hwpstate.c optional cpufreq x86/cpufreq/p4tcc.c optional cpufreq x86/cpufreq/powernow.c optional cpufreq x86/cpufreq/smist.c optional cpufreq x86/iommu/busdma_dmar.c optional acpi acpi_dmar pci x86/iommu/intel_ctx.c optional acpi acpi_dmar pci x86/iommu/intel_drv.c optional acpi acpi_dmar pci x86/iommu/intel_fault.c optional acpi acpi_dmar pci x86/iommu/intel_gas.c optional acpi acpi_dmar pci x86/iommu/intel_idpgtbl.c optional acpi acpi_dmar pci x86/iommu/intel_intrmap.c optional acpi acpi_dmar pci x86/iommu/intel_qi.c optional acpi acpi_dmar pci x86/iommu/intel_quirks.c optional acpi acpi_dmar pci x86/iommu/intel_utils.c optional acpi acpi_dmar pci x86/isa/atpic.c optional atpic x86/isa/atrtc.c standard x86/isa/clock.c standard x86/isa/elcr.c optional atpic | apic x86/isa/isa.c optional isa x86/isa/isa_dma.c optional isa x86/isa/nmi.c standard x86/isa/orm.c optional isa x86/pci/pci_bus.c optional pci x86/pci/qpi.c optional pci x86/x86/autoconf.c standard x86/x86/bus_machdep.c standard x86/x86/busdma_bounce.c standard x86/x86/busdma_machdep.c standard x86/x86/cpu_machdep.c standard x86/x86/dump_machdep.c standard x86/x86/fdt_machdep.c optional fdt x86/x86/identcpu.c standard x86/x86/intr_machdep.c standard x86/x86/io_apic.c optional apic x86/x86/legacy.c standard x86/x86/local_apic.c optional apic x86/x86/mca.c standard x86/x86/x86_mem.c optional mem x86/x86/mptable.c optional apic x86/x86/mptable_pci.c optional apic pci x86/x86/mp_x86.c optional smp x86/x86/mp_watchdog.c optional mp_watchdog smp x86/x86/msi.c optional apic pci x86/x86/nexus.c standard x86/x86/stack_machdep.c optional ddb | stack x86/x86/tsc.c standard x86/x86/pvclock.c standard x86/x86/delay.c standard x86/xen/hvm.c optional xenhvm x86/xen/xen_intr.c optional xenhvm x86/xen/xen_apic.c optional xenhvm x86/xen/xenpv.c optional xenhvm x86/xen/xen_nexus.c optional xenhvm x86/xen/xen_msi.c optional xenhvm Index: projects/clang500-import/sys/dev/cxgbe/t4_sge.c =================================================================== --- projects/clang500-import/sys/dev/cxgbe/t4_sge.c (revision 319548) +++ projects/clang500-import/sys/dev/cxgbe/t4_sge.c (revision 319549) @@ -1,5334 +1,5338 @@ /*- * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_NETMAP #include #include #include #include #include #endif #include "common/common.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "common/t4_msg.h" #include "t4_l2t.h" #include "t4_mp_ring.h" #ifdef T4_PKT_TIMESTAMP #define RX_COPY_THRESHOLD (MINCLSIZE - 8) #else #define RX_COPY_THRESHOLD MINCLSIZE #endif /* * Ethernet frames are DMA'd at this byte offset into the freelist buffer. * 0-7 are valid values. */ static int fl_pktshift = 2; TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift); /* * Pad ethernet payload up to this boundary. * -1: driver should figure out a good value. * 0: disable padding. * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. */ int fl_pad = -1; TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad); /* * Status page length. * -1: driver should figure out a good value. * 64 or 128 are the only other valid values. */ static int spg_len = -1; TUNABLE_INT("hw.cxgbe.spg_len", &spg_len); /* * Congestion drops. * -1: no congestion feedback (not recommended). * 0: backpressure the channel instead of dropping packets right away. * 1: no backpressure, drop packets for the congested queue immediately. */ static int cong_drop = 0; TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop); /* * Deliver multiple frames in the same free list buffer if they fit. * -1: let the driver decide whether to enable buffer packing or not. * 0: disable buffer packing. * 1: enable buffer packing. */ static int buffer_packing = -1; TUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing); /* * Start next frame in a packed buffer at this boundary. * -1: driver should figure out a good value. * T4: driver will ignore this and use the same value as fl_pad above. * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. */ static int fl_pack = -1; TUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack); /* * Allow the driver to create mbuf(s) in a cluster allocated for rx. * 0: never; always allocate mbufs from the zone_mbuf UMA zone. * 1: ok to create mbuf(s) within a cluster if there is room. */ static int allow_mbufs_in_cluster = 1; TUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster); /* * Largest rx cluster size that the driver is allowed to allocate. */ static int largest_rx_cluster = MJUM16BYTES; TUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster); /* * Size of cluster allocation that's most likely to succeed. The driver will * fall back to this size if it fails to allocate clusters larger than this. */ static int safest_rx_cluster = PAGE_SIZE; TUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster); /* * The interrupt holdoff timers are multiplied by this value on T6+. * 1 and 3-17 (both inclusive) are legal values. */ static int tscale = 1; TUNABLE_INT("hw.cxgbe.tscale", &tscale); /* * Number of LRO entries in the lro_ctrl structure per rx queue. */ static int lro_entries = TCP_LRO_ENTRIES; TUNABLE_INT("hw.cxgbe.lro_entries", &lro_entries); /* * This enables presorting of frames before they're fed into tcp_lro_rx. */ static int lro_mbufs = 0; TUNABLE_INT("hw.cxgbe.lro_mbufs", &lro_mbufs); struct txpkts { u_int wr_type; /* type 0 or type 1 */ u_int npkt; /* # of packets in this work request */ u_int plen; /* total payload (sum of all packets) */ u_int len16; /* # of 16B pieces used by this work request */ }; /* A packet's SGL. This + m_pkthdr has all info needed for tx */ struct sgl { struct sglist sg; struct sglist_seg seg[TX_SGL_SEGS]; }; static int service_iq(struct sge_iq *, int); static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, uint16_t, char *); static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, bus_addr_t *, void **); static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, void *); static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, int, int); static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *); static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, struct sysctl_oid *, struct sge_fl *); static int alloc_fwq(struct adapter *); static int free_fwq(struct adapter *); static int alloc_mgmtq(struct adapter *); static int free_mgmtq(struct adapter *); static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, struct sysctl_oid *); static int free_rxq(struct vi_info *, struct sge_rxq *); #ifdef TCP_OFFLOAD static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, struct sysctl_oid *); static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); #endif #ifdef DEV_NETMAP static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int, struct sysctl_oid *); static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int, struct sysctl_oid *); static int free_nm_txq(struct vi_info *, struct sge_nm_txq *); #endif static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); #ifdef TCP_OFFLOAD static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); #endif static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *); static int free_eq(struct adapter *, struct sge_eq *); static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, struct sysctl_oid *); static int free_wrq(struct adapter *, struct sge_wrq *); static int alloc_txq(struct vi_info *, struct sge_txq *, int, struct sysctl_oid *); static int free_txq(struct vi_info *, struct sge_txq *); static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); static inline void ring_fl_db(struct adapter *, struct sge_fl *); static int refill_fl(struct adapter *, struct sge_fl *, int); static void refill_sfl(void *); static int alloc_fl_sdesc(struct sge_fl *); static void free_fl_sdesc(struct adapter *, struct sge_fl *); static void find_best_refill_source(struct adapter *, struct sge_fl *, int); static void find_safe_refill_source(struct adapter *, struct sge_fl *); static void add_fl_to_sfl(struct adapter *, struct sge_fl *); static inline void get_pkt_gl(struct mbuf *, struct sglist *); static inline u_int txpkt_len16(u_int, u_int); static inline u_int txpkt_vm_len16(u_int, u_int); static inline u_int txpkts0_len16(u_int); static inline u_int txpkts1_len16(void); static u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *, struct mbuf *, u_int); static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, struct fw_eth_tx_pkt_vm_wr *, struct mbuf *, u_int); static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int); static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int); static u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *, struct mbuf *, const struct txpkts *, u_int); static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); static inline uint16_t read_hw_cidx(struct sge_eq *); static inline u_int reclaimable_tx_desc(struct sge_eq *); static inline u_int total_available_tx_desc(struct sge_eq *); static u_int reclaim_tx_descs(struct sge_txq *, u_int); static void tx_reclaim(void *, int); static __be64 get_flit(struct sglist_seg *, int, int); static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, struct mbuf *); static int handle_fw_msg(struct sge_iq *, const struct rss_header *, struct mbuf *); static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); static void wrq_tx_drain(void *, int); static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); static int sysctl_uint16(SYSCTL_HANDLER_ARGS); static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); static int sysctl_tc(SYSCTL_HANDLER_ARGS); static counter_u64_t extfree_refs; static counter_u64_t extfree_rels; an_handler_t t4_an_handler; fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; static int an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) { #ifdef INVARIANTS panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); #else log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", __func__, iq, ctrl); #endif return (EDOOFUS); } int t4_register_an_handler(an_handler_t h) { uintptr_t *loc, new; new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; loc = (uintptr_t *) &t4_an_handler; atomic_store_rel_ptr(loc, new); return (0); } static int fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) { const struct cpl_fw6_msg *cpl = __containerof(rpl, struct cpl_fw6_msg, data[0]); #ifdef INVARIANTS panic("%s: fw_msg type %d", __func__, cpl->type); #else log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); #endif return (EDOOFUS); } int t4_register_fw_msg_handler(int type, fw_msg_handler_t h) { uintptr_t *loc, new; if (type >= nitems(t4_fw_msg_handler)) return (EINVAL); /* * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL * handler dispatch table. Reject any attempt to install a handler for * this subtype. */ if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) return (EINVAL); new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; loc = (uintptr_t *) &t4_fw_msg_handler[type]; atomic_store_rel_ptr(loc, new); return (0); } static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { #ifdef INVARIANTS panic("%s: opcode 0x%02x on iq %p with payload %p", __func__, rss->opcode, iq, m); #else log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", __func__, rss->opcode, iq, m); m_freem(m); #endif return (EDOOFUS); } int t4_register_cpl_handler(int opcode, cpl_handler_t h) { uintptr_t *loc, new; if (opcode >= nitems(t4_cpl_handler)) return (EINVAL); new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; loc = (uintptr_t *) &t4_cpl_handler[opcode]; atomic_store_rel_ptr(loc, new); return (0); } /* * Called on MOD_LOAD. Validates and calculates the SGE tunables. */ void t4_sge_modload(void) { int i; if (fl_pktshift < 0 || fl_pktshift > 7) { printf("Invalid hw.cxgbe.fl_pktshift value (%d)," " using 2 instead.\n", fl_pktshift); fl_pktshift = 2; } if (spg_len != 64 && spg_len != 128) { int len; #if defined(__i386__) || defined(__amd64__) len = cpu_clflush_line_size > 64 ? 128 : 64; #else len = 64; #endif if (spg_len != -1) { printf("Invalid hw.cxgbe.spg_len value (%d)," " using %d instead.\n", spg_len, len); } spg_len = len; } if (cong_drop < -1 || cong_drop > 1) { printf("Invalid hw.cxgbe.cong_drop value (%d)," " using 0 instead.\n", cong_drop); cong_drop = 0; } if (tscale != 1 && (tscale < 3 || tscale > 17)) { printf("Invalid hw.cxgbe.tscale value (%d)," " using 1 instead.\n", tscale); tscale = 1; } extfree_refs = counter_u64_alloc(M_WAITOK); extfree_rels = counter_u64_alloc(M_WAITOK); counter_u64_zero(extfree_refs); counter_u64_zero(extfree_rels); t4_an_handler = an_not_handled; for (i = 0; i < nitems(t4_fw_msg_handler); i++) t4_fw_msg_handler[i] = fw_msg_not_handled; for (i = 0; i < nitems(t4_cpl_handler); i++) t4_cpl_handler[i] = cpl_not_handled; t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx); t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); } void t4_sge_modunload(void) { counter_u64_free(extfree_refs); counter_u64_free(extfree_rels); } uint64_t t4_sge_extfree_refs(void) { uint64_t refs, rels; rels = counter_u64_fetch(extfree_rels); refs = counter_u64_fetch(extfree_refs); return (refs - rels); } static inline void setup_pad_and_pack_boundaries(struct adapter *sc) { uint32_t v, m; int pad, pack, pad_shift; pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : X_INGPADBOUNDARY_SHIFT; pad = fl_pad; if (fl_pad < (1 << pad_shift) || fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || !powerof2(fl_pad)) { /* * If there is any chance that we might use buffer packing and * the chip is a T4, then pick 64 as the pad/pack boundary. Set * it to the minimum allowed in all other cases. */ pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; /* * For fl_pad = 0 we'll still write a reasonable value to the * register but all the freelists will opt out of padding. * We'll complain here only if the user tried to set it to a * value greater than 0 that was invalid. */ if (fl_pad > 0) { device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" " (%d), using %d instead.\n", fl_pad, pad); } } m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); t4_set_reg_field(sc, A_SGE_CONTROL, m, v); if (is_t4(sc)) { if (fl_pack != -1 && fl_pack != pad) { /* Complain but carry on. */ device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," " using %d instead.\n", fl_pack, pad); } return; } pack = fl_pack; if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || !powerof2(fl_pack)) { pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); MPASS(powerof2(pack)); if (pack < 16) pack = 16; if (pack == 32) pack = 64; if (pack > 4096) pack = 4096; if (fl_pack != -1) { device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" " (%d), using %d instead.\n", fl_pack, pack); } } m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); if (pack == 16) v = V_INGPACKBOUNDARY(0); else v = V_INGPACKBOUNDARY(ilog2(pack) - 5); MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); } /* * adap->params.vpd.cclk must be set up before this is called. */ void t4_tweak_chip_settings(struct adapter *sc) { int i; uint32_t v, m; int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); static int sge_flbuf_sizes[] = { MCLBYTES, #if MJUMPAGESIZE != MCLBYTES MJUMPAGESIZE, MJUMPAGESIZE - CL_METADATA_SIZE, MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, #endif MJUM9BYTES, MJUM16BYTES, MCLBYTES - MSIZE - CL_METADATA_SIZE, MJUM9BYTES - CL_METADATA_SIZE, MJUM16BYTES - CL_METADATA_SIZE, }; KASSERT(sc->flags & MASTER_PF, ("%s: trying to change chip settings when not master.", __func__)); m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | V_EGRSTATUSPAGESIZE(spg_len == 128); t4_set_reg_field(sc, A_SGE_CONTROL, m, v); setup_pad_and_pack_boundaries(sc); v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, ("%s: hw buffer size table too big", __func__)); for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), sge_flbuf_sizes[i]); } v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); KASSERT(intr_timer[0] <= timer_max, ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], timer_max)); for (i = 1; i < nitems(intr_timer); i++) { KASSERT(intr_timer[i] >= intr_timer[i - 1], ("%s: timers not listed in increasing order (%d)", __func__, i)); while (intr_timer[i] > timer_max) { if (i == nitems(intr_timer) - 1) { intr_timer[i] = timer_max; break; } intr_timer[i] += intr_timer[i - 1]; intr_timer[i] /= 2; } } v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); if (chip_id(sc) >= CHELSIO_T6) { m = V_TSCALE(M_TSCALE); if (tscale == 1) v = 0; else v = V_TSCALE(tscale - 2); t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); } /* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */ v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); /* * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been * chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we * may have to deal with is MAXPHYS + 1 page. */ v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4); t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v); /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ m = v = F_TDDPTAGTCB | F_ISCSITAGTCB; t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); } /* * SGE wants the buffer to be at least 64B and then a multiple of 16. If * padding is in use, the buffer's start and end need to be aligned to the pad * boundary as well. We'll just make sure that the size is a multiple of the * boundary here, it is up to the buffer allocation code to make sure the start * of the buffer is aligned as well. */ static inline int hwsz_ok(struct adapter *sc, int hwsz) { int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; return (hwsz >= 64 && (hwsz & mask) == 0); } /* * XXX: driver really should be able to deal with unexpected settings. */ int t4_read_chip_settings(struct adapter *sc) { struct sge *s = &sc->sge; struct sge_params *sp = &sc->params.sge; int i, j, n, rc = 0; uint32_t m, v, r; uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); static int sw_buf_sizes[] = { /* Sorted by size */ MCLBYTES, #if MJUMPAGESIZE != MCLBYTES MJUMPAGESIZE, #endif MJUM9BYTES, MJUM16BYTES }; struct sw_zone_info *swz, *safe_swz; struct hw_buf_info *hwb; m = F_RXPKTCPLMODE; v = F_RXPKTCPLMODE; r = sc->params.sge.sge_control; if ((r & m) != v) { device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); rc = EINVAL; } /* * If this changes then every single use of PAGE_SHIFT in the driver * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. */ if (sp->page_shift != PAGE_SHIFT) { device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); rc = EINVAL; } /* Filter out unusable hw buffer sizes entirely (mark with -2). */ hwb = &s->hw_buf_info[0]; for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { r = sc->params.sge.sge_fl_buffer_size[i]; hwb->size = r; hwb->zidx = hwsz_ok(sc, r) ? -1 : -2; hwb->next = -1; } /* * Create a sorted list in decreasing order of hw buffer sizes (and so * increasing order of spare area) for each software zone. * * If padding is enabled then the start and end of the buffer must align * to the pad boundary; if packing is enabled then they must align with * the pack boundary as well. Allocations from the cluster zones are * aligned to min(size, 4K), so the buffer starts at that alignment and * ends at hwb->size alignment. If mbuf inlining is allowed the * starting alignment will be reduced to MSIZE and the driver will * exercise appropriate caution when deciding on the best buffer layout * to use. */ n = 0; /* no usable buffer size to begin with */ swz = &s->sw_zone_info[0]; safe_swz = NULL; for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { int8_t head = -1, tail = -1; swz->size = sw_buf_sizes[i]; swz->zone = m_getzone(swz->size); swz->type = m_gettype(swz->size); if (swz->size < PAGE_SIZE) { MPASS(powerof2(swz->size)); if (fl_pad && (swz->size % sp->pad_boundary != 0)) continue; } if (swz->size == safest_rx_cluster) safe_swz = swz; hwb = &s->hw_buf_info[0]; for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { if (hwb->zidx != -1 || hwb->size > swz->size) continue; #ifdef INVARIANTS if (fl_pad) MPASS(hwb->size % sp->pad_boundary == 0); #endif hwb->zidx = i; if (head == -1) head = tail = j; else if (hwb->size < s->hw_buf_info[tail].size) { s->hw_buf_info[tail].next = j; tail = j; } else { int8_t *cur; struct hw_buf_info *t; for (cur = &head; *cur != -1; cur = &t->next) { t = &s->hw_buf_info[*cur]; if (hwb->size == t->size) { hwb->zidx = -2; break; } if (hwb->size > t->size) { hwb->next = *cur; *cur = j; break; } } } } swz->head_hwidx = head; swz->tail_hwidx = tail; if (tail != -1) { n++; if (swz->size - s->hw_buf_info[tail].size >= CL_METADATA_SIZE) sc->flags |= BUF_PACKING_OK; } } if (n == 0) { device_printf(sc->dev, "no usable SGE FL buffer size.\n"); rc = EINVAL; } s->safe_hwidx1 = -1; s->safe_hwidx2 = -1; if (safe_swz != NULL) { s->safe_hwidx1 = safe_swz->head_hwidx; for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { int spare; hwb = &s->hw_buf_info[i]; #ifdef INVARIANTS if (fl_pad) MPASS(hwb->size % sp->pad_boundary == 0); #endif spare = safe_swz->size - hwb->size; if (spare >= CL_METADATA_SIZE) { s->safe_hwidx2 = i; break; } } } if (sc->flags & IS_VF) return (0); v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); if (r != v) { device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); rc = EINVAL; } m = v = F_TDDPTAGTCB; r = t4_read_reg(sc, A_ULP_RX_CTL); if ((r & m) != v) { device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); rc = EINVAL; } m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; r = t4_read_reg(sc, A_TP_PARA_REG5); if ((r & m) != v) { device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); rc = EINVAL; } t4_init_tp_params(sc); t4_read_mtu_tbl(sc, sc->params.mtus, NULL); t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); return (rc); } int t4_create_dma_tag(struct adapter *sc) { int rc; rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->dmat); if (rc != 0) { device_printf(sc->dev, "failed to create main DMA tag: %d\n", rc); } return (rc); } void t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *children) { struct sge_params *sp = &sc->params.sge; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", "freelist buffer sizes"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, NULL, sp->pad_boundary, "payload pad boundary (bytes)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, NULL, sp->spg_len, "status page size (bytes)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, NULL, cong_drop, "congestion drop setting"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, NULL, sp->pack_boundary, "payload pack boundary (bytes)"); } int t4_destroy_dma_tag(struct adapter *sc) { if (sc->dmat) bus_dma_tag_destroy(sc->dmat); return (0); } /* * Allocate and initialize the firmware event queue and the management queue. * * Returns errno on failure. Resources allocated up to that point may still be * allocated. Caller is responsible for cleanup in case this function fails. */ int t4_setup_adapter_queues(struct adapter *sc) { int rc; ADAPTER_LOCK_ASSERT_NOTOWNED(sc); sysctl_ctx_init(&sc->ctx); sc->flags |= ADAP_SYSCTL_CTX; /* * Firmware event queue */ rc = alloc_fwq(sc); if (rc != 0) return (rc); /* * Management queue. This is just a control queue that uses the fwq as * its associated iq. */ if (!(sc->flags & IS_VF)) rc = alloc_mgmtq(sc); return (rc); } /* * Idempotent */ int t4_teardown_adapter_queues(struct adapter *sc) { ADAPTER_LOCK_ASSERT_NOTOWNED(sc); /* Do this before freeing the queue */ if (sc->flags & ADAP_SYSCTL_CTX) { sysctl_ctx_free(&sc->ctx); sc->flags &= ~ADAP_SYSCTL_CTX; } free_mgmtq(sc); free_fwq(sc); return (0); } static inline int first_vector(struct vi_info *vi) { struct adapter *sc = vi->pi->adapter; if (sc->intr_count == 1) return (0); return (vi->first_intr); } /* * Given an arbitrary "index," come up with an iq that can be used by other * queues (of this VI) for interrupt forwarding, SGE egress updates, etc. * The iq returned is guaranteed to be something that takes direct interrupts. */ static struct sge_iq * vi_intr_iq(struct vi_info *vi, int idx) { struct adapter *sc = vi->pi->adapter; struct sge *s = &sc->sge; struct sge_iq *iq = NULL; int nintr, i; if (sc->intr_count == 1) return (&sc->sge.fwq); nintr = vi->nintr; #ifdef DEV_NETMAP /* Do not consider any netmap-only interrupts */ if (vi->flags & INTR_RXQ && vi->nnmrxq > vi->nrxq) nintr -= vi->nnmrxq - vi->nrxq; #endif KASSERT(nintr != 0, ("%s: vi %p has no exclusive interrupts, total interrupts = %d", __func__, vi, sc->intr_count)); i = idx % nintr; if (vi->flags & INTR_RXQ) { if (i < vi->nrxq) { iq = &s->rxq[vi->first_rxq + i].iq; goto done; } i -= vi->nrxq; } #ifdef TCP_OFFLOAD if (vi->flags & INTR_OFLD_RXQ) { if (i < vi->nofldrxq) { iq = &s->ofld_rxq[vi->first_ofld_rxq + i].iq; goto done; } i -= vi->nofldrxq; } #endif panic("%s: vi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__, vi, vi->flags & INTR_ALL, idx, nintr); done: MPASS(iq != NULL); KASSERT(iq->flags & IQ_INTR, ("%s: iq %p (vi %p, intr_flags 0x%lx, idx %d)", __func__, iq, vi, vi->flags & INTR_ALL, idx)); return (iq); } /* Maximum payload that can be delivered with a single iq descriptor */ static inline int mtu_to_max_payload(struct adapter *sc, int mtu, const int toe) { int payload; #ifdef TCP_OFFLOAD if (toe) { payload = sc->tt.rx_coalesce ? G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)) : mtu; } else { #endif /* large enough even when hw VLAN extraction is disabled */ payload = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + mtu; #ifdef TCP_OFFLOAD } #endif return (payload); } int t4_setup_vi_queues(struct vi_info *vi) { int rc = 0, i, j, intr_idx, iqid; struct sge_rxq *rxq; struct sge_txq *txq; struct sge_wrq *ctrlq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; struct sge_wrq *ofld_txq; #endif #ifdef DEV_NETMAP int saved_idx; struct sge_nm_rxq *nm_rxq; struct sge_nm_txq *nm_txq; #endif char name[16]; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifnet *ifp = vi->ifp; struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev); struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); int maxp, mtu = ifp->if_mtu; /* Interrupt vector to start from (when using multiple vectors) */ intr_idx = first_vector(vi); #ifdef DEV_NETMAP saved_idx = intr_idx; if (ifp->if_capabilities & IFCAP_NETMAP) { /* netmap is supported with direct interrupts only. */ MPASS(vi->flags & INTR_RXQ); /* * We don't have buffers to back the netmap rx queues * right now so we create the queues in a way that * doesn't set off any congestion signal in the chip. */ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq", CTLFLAG_RD, NULL, "rx queues"); for_each_nm_rxq(vi, i, nm_rxq) { rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid); if (rc != 0) goto done; intr_idx++; } oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq", CTLFLAG_RD, NULL, "tx queues"); for_each_nm_txq(vi, i, nm_txq) { iqid = vi->first_nm_rxq + (i % vi->nnmrxq); rc = alloc_nm_txq(vi, nm_txq, iqid, i, oid); if (rc != 0) goto done; } } /* Normal rx queues and netmap rx queues share the same interrupts. */ intr_idx = saved_idx; #endif /* * First pass over all NIC and TOE rx queues: * a) initialize iq and fl * b) allocate queue iff it will take direct interrupts. */ maxp = mtu_to_max_payload(sc, mtu, 0); if (vi->flags & INTR_RXQ) { oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD, NULL, "rx queues"); } for_each_rxq(vi, i, rxq) { init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); snprintf(name, sizeof(name), "%s rxq%d-fl", device_get_nameunit(vi->dev), i); init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); if (vi->flags & INTR_RXQ) { rxq->iq.flags |= IQ_INTR; rc = alloc_rxq(vi, rxq, intr_idx, i, oid); if (rc != 0) goto done; intr_idx++; } } #ifdef DEV_NETMAP if (ifp->if_capabilities & IFCAP_NETMAP) intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); #endif #ifdef TCP_OFFLOAD maxp = mtu_to_max_payload(sc, mtu, 1); if (vi->flags & INTR_OFLD_RXQ) { oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections"); } for_each_ofld_rxq(vi, i, ofld_rxq) { init_iq(&ofld_rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", device_get_nameunit(vi->dev), i); init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); if (vi->flags & INTR_OFLD_RXQ) { ofld_rxq->iq.flags |= IQ_INTR; rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid); if (rc != 0) goto done; intr_idx++; } } #endif /* * Second pass over all NIC and TOE rx queues. The queues forwarding * their interrupts are allocated now. */ j = 0; if (!(vi->flags & INTR_RXQ)) { oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD, NULL, "rx queues"); for_each_rxq(vi, i, rxq) { MPASS(!(rxq->iq.flags & IQ_INTR)); intr_idx = vi_intr_iq(vi, j)->abs_id; rc = alloc_rxq(vi, rxq, intr_idx, i, oid); if (rc != 0) goto done; j++; } } #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0 && !(vi->flags & INTR_OFLD_RXQ)) { oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections"); for_each_ofld_rxq(vi, i, ofld_rxq) { MPASS(!(ofld_rxq->iq.flags & IQ_INTR)); intr_idx = vi_intr_iq(vi, j)->abs_id; rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid); if (rc != 0) goto done; j++; } } #endif /* * Now the tx queues. Only one pass needed. */ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, NULL, "tx queues"); j = 0; for_each_txq(vi, i, txq) { iqid = vi_intr_iq(vi, j)->cntxt_id; snprintf(name, sizeof(name), "%s txq%d", device_get_nameunit(vi->dev), i); init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, iqid, name); rc = alloc_txq(vi, txq, i, oid); if (rc != 0) goto done; j++; } #ifdef TCP_OFFLOAD oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq", CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections"); for_each_ofld_txq(vi, i, ofld_txq) { struct sysctl_oid *oid2; iqid = vi_intr_iq(vi, j)->cntxt_id; snprintf(name, sizeof(name), "%s ofld_txq%d", device_get_nameunit(vi->dev), i); init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan, iqid, name); snprintf(name, sizeof(name), "%d", i); oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, "offload tx queue"); rc = alloc_wrq(sc, vi, ofld_txq, oid2); if (rc != 0) goto done; j++; } #endif /* * Finally, the control queue. */ if (!IS_MAIN_VI(vi) || sc->flags & IS_VF) goto done; oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD, NULL, "ctrl queue"); ctrlq = &sc->sge.ctrlq[pi->port_id]; iqid = vi_intr_iq(vi, 0)->cntxt_id; snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev)); init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name); rc = alloc_wrq(sc, vi, ctrlq, oid); done: if (rc) t4_teardown_vi_queues(vi); return (rc); } /* * Idempotent */ int t4_teardown_vi_queues(struct vi_info *vi) { int i; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct sge_rxq *rxq; struct sge_txq *txq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; struct sge_wrq *ofld_txq; #endif #ifdef DEV_NETMAP struct sge_nm_rxq *nm_rxq; struct sge_nm_txq *nm_txq; #endif /* Do this before freeing the queues */ if (vi->flags & VI_SYSCTL_CTX) { sysctl_ctx_free(&vi->ctx); vi->flags &= ~VI_SYSCTL_CTX; } #ifdef DEV_NETMAP if (vi->ifp->if_capabilities & IFCAP_NETMAP) { for_each_nm_txq(vi, i, nm_txq) { free_nm_txq(vi, nm_txq); } for_each_nm_rxq(vi, i, nm_rxq) { free_nm_rxq(vi, nm_rxq); } } #endif /* * Take down all the tx queues first, as they reference the rx queues * (for egress updates, etc.). */ if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) free_wrq(sc, &sc->sge.ctrlq[pi->port_id]); for_each_txq(vi, i, txq) { free_txq(vi, txq); } #ifdef TCP_OFFLOAD for_each_ofld_txq(vi, i, ofld_txq) { free_wrq(sc, ofld_txq); } #endif /* * Then take down the rx queues that forward their interrupts, as they * reference other rx queues. */ for_each_rxq(vi, i, rxq) { if ((rxq->iq.flags & IQ_INTR) == 0) free_rxq(vi, rxq); } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { if ((ofld_rxq->iq.flags & IQ_INTR) == 0) free_ofld_rxq(vi, ofld_rxq); } #endif /* * Then take down the rx queues that take direct interrupts. */ for_each_rxq(vi, i, rxq) { if (rxq->iq.flags & IQ_INTR) free_rxq(vi, rxq); } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { if (ofld_rxq->iq.flags & IQ_INTR) free_ofld_rxq(vi, ofld_rxq); } #endif return (0); } /* * Deals with errors and the firmware event queue. All data rx queues forward * their interrupt to the firmware event queue. */ void t4_intr_all(void *arg) { struct adapter *sc = arg; struct sge_iq *fwq = &sc->sge.fwq; t4_intr_err(arg); if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) { service_iq(fwq, 0); atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE); } } /* Deals with error interrupts */ void t4_intr_err(void *arg) { struct adapter *sc = arg; t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); t4_slow_intr_handler(sc); } void t4_intr_evt(void *arg) { struct sge_iq *iq = arg; if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { service_iq(iq, 0); atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); } } void t4_intr(void *arg) { struct sge_iq *iq = arg; if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { service_iq(iq, 0); atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); } } void t4_vi_intr(void *arg) { struct irq *irq = arg; #ifdef DEV_NETMAP if (atomic_cmpset_int(&irq->nm_state, NM_ON, NM_BUSY)) { t4_nm_intr(irq->nm_rxq); atomic_cmpset_int(&irq->nm_state, NM_BUSY, NM_ON); } #endif if (irq->rxq != NULL) t4_intr(irq->rxq); } static inline int sort_before_lro(struct lro_ctrl *lro) { return (lro->lro_mbuf_max != 0); } /* * Deals with anything and everything on the given ingress queue. */ static int service_iq(struct sge_iq *iq, int budget) { struct sge_iq *q; struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ struct sge_fl *fl; /* Use iff IQ_HAS_FL */ struct adapter *sc = iq->adapter; struct iq_desc *d = &iq->desc[iq->cidx]; int ndescs = 0, limit; int rsp_type, refill; uint32_t lq; uint16_t fl_hw_cidx; struct mbuf *m0; STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); #if defined(INET) || defined(INET6) const struct timeval lro_timeout = {0, sc->lro_timeout}; struct lro_ctrl *lro = &rxq->lro; #endif KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); limit = budget ? budget : iq->qsize / 16; if (iq->flags & IQ_HAS_FL) { fl = &rxq->fl; fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ } else { fl = NULL; fl_hw_cidx = 0; /* to silence gcc warning */ } #if defined(INET) || defined(INET6) if (iq->flags & IQ_ADJ_CREDIT) { MPASS(sort_before_lro(lro)); iq->flags &= ~IQ_ADJ_CREDIT; if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { tcp_lro_flush_all(lro); t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); return (0); } ndescs = 1; } #else MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); #endif /* * We always come back and check the descriptor ring for new indirect * interrupts and other responses after running a single handler. */ for (;;) { while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { rmb(); refill = 0; m0 = NULL; rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); lq = be32toh(d->rsp.pldbuflen_qid); switch (rsp_type) { case X_RSPD_TYPE_FLBUF: KASSERT(iq->flags & IQ_HAS_FL, ("%s: data for an iq (%p) with no freelist", __func__, iq)); m0 = get_fl_payload(sc, fl, lq); if (__predict_false(m0 == NULL)) goto process_iql; refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; #ifdef T4_PKT_TIMESTAMP /* * 60 bit timestamp for the payload is * *(uint64_t *)m0->m_pktdat. Note that it is * in the leading free-space in the mbuf. The * kernel can clobber it during a pullup, * m_copymdata, etc. You need to make sure that * the mbuf reaches you unmolested if you care * about the timestamp. */ *(uint64_t *)m0->m_pktdat = be64toh(ctrl->u.last_flit) & 0xfffffffffffffff; #endif /* fall through */ case X_RSPD_TYPE_CPL: KASSERT(d->rss.opcode < NUM_CPL_CMDS, ("%s: bad opcode %02x.", __func__, d->rss.opcode)); t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); break; case X_RSPD_TYPE_INTR: /* * Interrupts should be forwarded only to queues * that are not forwarding their interrupts. * This means service_iq can recurse but only 1 * level deep. */ KASSERT(budget == 0, ("%s: budget %u, rsp_type %u", __func__, budget, rsp_type)); /* * There are 1K interrupt-capable queues (qids 0 * through 1023). A response type indicating a * forwarded interrupt with a qid >= 1K is an * iWARP async notification. */ if (lq >= 1024) { t4_an_handler(iq, &d->rsp); break; } q = sc->sge.iqmap[lq - sc->sge.iq_start - sc->sge.iq_base]; if (atomic_cmpset_int(&q->state, IQS_IDLE, IQS_BUSY)) { if (service_iq(q, q->qsize / 16) == 0) { atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); } else { STAILQ_INSERT_TAIL(&iql, q, link); } } break; default: KASSERT(0, ("%s: illegal response type %d on iq %p", __func__, rsp_type, iq)); log(LOG_ERR, "%s: illegal response type %d on iq %p", device_get_nameunit(sc->dev), rsp_type, iq); break; } d++; if (__predict_false(++iq->cidx == iq->sidx)) { iq->cidx = 0; iq->gen ^= F_RSPD_GEN; d = &iq->desc[0]; } if (__predict_false(++ndescs == limit)) { t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | V_INGRESSQID(iq->cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); ndescs = 0; #if defined(INET) || defined(INET6) if (iq->flags & IQ_LRO_ENABLED && !sort_before_lro(lro) && sc->lro_timeout != 0) { tcp_lro_flush_inactive(lro, &lro_timeout); } #endif if (budget) { if (iq->flags & IQ_HAS_FL) { FL_LOCK(fl); refill_fl(sc, fl, 32); FL_UNLOCK(fl); } return (EINPROGRESS); } } if (refill) { FL_LOCK(fl); refill_fl(sc, fl, 32); FL_UNLOCK(fl); fl_hw_cidx = fl->hw_cidx; } } process_iql: if (STAILQ_EMPTY(&iql)) break; /* * Process the head only, and send it to the back of the list if * it's still not done. */ q = STAILQ_FIRST(&iql); STAILQ_REMOVE_HEAD(&iql, link); if (service_iq(q, q->qsize / 8) == 0) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); else STAILQ_INSERT_TAIL(&iql, q, link); } #if defined(INET) || defined(INET6) if (iq->flags & IQ_LRO_ENABLED) { if (ndescs > 0 && lro->lro_mbuf_count > 8) { MPASS(sort_before_lro(lro)); /* hold back one credit and don't flush LRO state */ iq->flags |= IQ_ADJ_CREDIT; ndescs--; } else { tcp_lro_flush_all(lro); } } #endif t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); if (iq->flags & IQ_HAS_FL) { int starved; FL_LOCK(fl); starved = refill_fl(sc, fl, 64); FL_UNLOCK(fl); if (__predict_false(starved != 0)) add_fl_to_sfl(sc, fl); } return (0); } static inline int cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) { int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; if (rc) MPASS(cll->region3 >= CL_METADATA_SIZE); return (rc); } static inline struct cluster_metadata * cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, caddr_t cl) { if (cl_has_metadata(fl, cll)) { struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; return ((struct cluster_metadata *)(cl + swz->size) - 1); } return (NULL); } static void rxb_free(struct mbuf *m, void *arg1, void *arg2) { uma_zone_t zone = arg1; caddr_t cl = arg2; uma_zfree(zone, cl); counter_u64_add(extfree_rels, 1); } /* * The mbuf returned by this function could be allocated from zone_mbuf or * constructed in spare room in the cluster. * * The mbuf carries the payload in one of these ways * a) frame inside the mbuf (mbuf from zone_mbuf) * b) m_cljset (for clusters without metadata) zone_mbuf * c) m_extaddref (cluster with metadata) inline mbuf * d) m_extaddref (cluster with metadata) zone_mbuf */ static struct mbuf * get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, int remaining) { struct mbuf *m; struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; struct cluster_layout *cll = &sd->cll; struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); int len, blen; caddr_t payload; blen = hwb->size - fl->rx_offset; /* max possible in this buf */ len = min(remaining, blen); payload = sd->cl + cll->region1 + fl->rx_offset; if (fl->flags & FL_BUF_PACKING) { const u_int l = fr_offset + len; const u_int pad = roundup2(l, fl->buf_boundary) - l; if (fl->rx_offset + len + pad < hwb->size) blen = len + pad; MPASS(fl->rx_offset + blen <= hwb->size); } else { MPASS(fl->rx_offset == 0); /* not packing */ } if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { /* * Copy payload into a freshly allocated mbuf. */ m = fr_offset == 0 ? m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); if (m == NULL) return (NULL); fl->mbuf_allocated++; #ifdef T4_PKT_TIMESTAMP /* Leave room for a timestamp */ m->m_data += 8; #endif /* copy data to mbuf */ bcopy(payload, mtod(m, caddr_t), len); } else if (sd->nmbuf * MSIZE < cll->region1) { /* * There's spare room in the cluster for an mbuf. Create one * and associate it with the payload that's in the cluster. */ MPASS(clm != NULL); m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); /* No bzero required */ if (m_init(m, M_NOWAIT, MT_DATA, fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) return (NULL); fl->mbuf_inlined++; m_extaddref(m, payload, blen, &clm->refcount, rxb_free, swz->zone, sd->cl); if (sd->nmbuf++ == 0) counter_u64_add(extfree_refs, 1); } else { /* * Grab an mbuf from zone_mbuf and associate it with the * payload in the cluster. */ m = fr_offset == 0 ? m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); if (m == NULL) return (NULL); fl->mbuf_allocated++; if (clm != NULL) { m_extaddref(m, payload, blen, &clm->refcount, rxb_free, swz->zone, sd->cl); if (sd->nmbuf++ == 0) counter_u64_add(extfree_refs, 1); } else { m_cljset(m, sd->cl, swz->type); sd->cl = NULL; /* consumed, not a recycle candidate */ } } if (fr_offset == 0) m->m_pkthdr.len = remaining; m->m_len = len; if (fl->flags & FL_BUF_PACKING) { fl->rx_offset += blen; MPASS(fl->rx_offset <= hwb->size); if (fl->rx_offset < hwb->size) return (m); /* without advancing the cidx */ } if (__predict_false(++fl->cidx % 8 == 0)) { uint16_t cidx = fl->cidx / 8; if (__predict_false(cidx == fl->sidx)) fl->cidx = cidx = 0; fl->hw_cidx = cidx; } fl->rx_offset = 0; return (m); } static struct mbuf * get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf) { struct mbuf *m0, *m, **pnext; u_int remaining; const u_int total = G_RSPD_LEN(len_newbuf); if (__predict_false(fl->flags & FL_BUF_RESUME)) { M_ASSERTPKTHDR(fl->m0); MPASS(fl->m0->m_pkthdr.len == total); MPASS(fl->remaining < total); m0 = fl->m0; pnext = fl->pnext; remaining = fl->remaining; fl->flags &= ~FL_BUF_RESUME; goto get_segment; } if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { fl->rx_offset = 0; if (__predict_false(++fl->cidx % 8 == 0)) { uint16_t cidx = fl->cidx / 8; if (__predict_false(cidx == fl->sidx)) fl->cidx = cidx = 0; fl->hw_cidx = cidx; } } /* * Payload starts at rx_offset in the current hw buffer. Its length is * 'len' and it may span multiple hw buffers. */ m0 = get_scatter_segment(sc, fl, 0, total); if (m0 == NULL) return (NULL); remaining = total - m0->m_len; pnext = &m0->m_next; while (remaining > 0) { get_segment: MPASS(fl->rx_offset == 0); m = get_scatter_segment(sc, fl, total - remaining, remaining); if (__predict_false(m == NULL)) { fl->m0 = m0; fl->pnext = pnext; fl->remaining = remaining; fl->flags |= FL_BUF_RESUME; return (NULL); } *pnext = m; pnext = &m->m_next; remaining -= m->m_len; } *pnext = NULL; M_ASSERTPKTHDR(m0); return (m0); } static int t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) { struct sge_rxq *rxq = iq_to_rxq(iq); struct ifnet *ifp = rxq->ifp; struct adapter *sc = iq->adapter; const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); #if defined(INET) || defined(INET6) struct lro_ctrl *lro = &rxq->lro; #endif static const int sw_hashtype[4][2] = { {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, }; KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, rss->opcode)); m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; m0->m_len -= sc->params.sge.fl_pktshift; m0->m_data += sc->params.sge.fl_pktshift; m0->m_pkthdr.rcvif = ifp; M_HASHTYPE_SET(m0, sw_hashtype[rss->hash_type][rss->ipv6]); m0->m_pkthdr.flowid = be32toh(rss->hash_val); if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) { if (ifp->if_capenable & IFCAP_RXCSUM && cpl->l2info & htobe32(F_RXF_IP)) { m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); rxq->rxcsum++; } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && cpl->l2info & htobe32(F_RXF_IP6)) { m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | CSUM_PSEUDO_HDR); rxq->rxcsum++; } if (__predict_false(cpl->ip_frag)) m0->m_pkthdr.csum_data = be16toh(cpl->csum); else m0->m_pkthdr.csum_data = 0xffff; } if (cpl->vlan_ex) { m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); m0->m_flags |= M_VLANTAG; rxq->vlan_extraction++; } #if defined(INET) || defined(INET6) if (iq->flags & IQ_LRO_ENABLED) { if (sort_before_lro(lro)) { tcp_lro_queue_mbuf(lro, m0); return (0); /* queued for sort, then LRO */ } if (tcp_lro_rx(lro, m0, 0) == 0) return (0); /* queued for LRO */ } #endif ifp->if_input(ifp, m0); return (0); } /* * Must drain the wrq or make sure that someone else will. */ static void wrq_tx_drain(void *arg, int n) { struct sge_wrq *wrq = arg; struct sge_eq *eq = &wrq->eq; EQ_LOCK(eq); if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) drain_wrq_wr_list(wrq->adapter, wrq); EQ_UNLOCK(eq); } static void drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) { struct sge_eq *eq = &wrq->eq; u_int available, dbdiff; /* # of hardware descriptors */ u_int n; struct wrqe *wr; struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ EQ_LOCK_ASSERT_OWNED(eq); MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); wr = STAILQ_FIRST(&wrq->wr_list); MPASS(wr != NULL); /* Must be called with something useful to do */ MPASS(eq->pidx == eq->dbidx); dbdiff = 0; do { eq->cidx = read_hw_cidx(eq); if (eq->pidx == eq->cidx) available = eq->sidx - 1; else available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; MPASS(wr->wrq == wrq); n = howmany(wr->wr_len, EQ_ESIZE); if (available < n) break; dst = (void *)&eq->desc[eq->pidx]; if (__predict_true(eq->sidx - eq->pidx > n)) { /* Won't wrap, won't end exactly at the status page. */ bcopy(&wr->wr[0], dst, wr->wr_len); eq->pidx += n; } else { int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; bcopy(&wr->wr[0], dst, first_portion); if (wr->wr_len > first_portion) { bcopy(&wr->wr[first_portion], &eq->desc[0], wr->wr_len - first_portion); } eq->pidx = n - (eq->sidx - eq->pidx); } wrq->tx_wrs_copied++; if (available < eq->sidx / 4 && atomic_cmpset_int(&eq->equiq, 0, 1)) { dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | F_FW_WR_EQUEQ); eq->equeqidx = eq->pidx; } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); eq->equeqidx = eq->pidx; } dbdiff += n; if (dbdiff >= 16) { ring_eq_db(sc, eq, dbdiff); dbdiff = 0; } STAILQ_REMOVE_HEAD(&wrq->wr_list, link); free_wrqe(wr); MPASS(wrq->nwr_pending > 0); wrq->nwr_pending--; MPASS(wrq->ndesc_needed >= n); wrq->ndesc_needed -= n; } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); if (dbdiff) ring_eq_db(sc, eq, dbdiff); } /* * Doesn't fail. Holds on to work requests it can't send right away. */ void t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) { #ifdef INVARIANTS struct sge_eq *eq = &wrq->eq; #endif EQ_LOCK_ASSERT_OWNED(eq); MPASS(wr != NULL); MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); MPASS((wr->wr_len & 0x7) == 0); STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); wrq->nwr_pending++; wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) return; /* commit_wrq_wr will drain wr_list as well. */ drain_wrq_wr_list(sc, wrq); /* Doorbell must have caught up to the pidx. */ MPASS(eq->pidx == eq->dbidx); } void t4_update_fl_bufsize(struct ifnet *ifp) { struct vi_info *vi = ifp->if_softc; struct adapter *sc = vi->pi->adapter; struct sge_rxq *rxq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif struct sge_fl *fl; int i, maxp, mtu = ifp->if_mtu; maxp = mtu_to_max_payload(sc, mtu, 0); for_each_rxq(vi, i, rxq) { fl = &rxq->fl; FL_LOCK(fl); find_best_refill_source(sc, fl, maxp); FL_UNLOCK(fl); } #ifdef TCP_OFFLOAD maxp = mtu_to_max_payload(sc, mtu, 1); for_each_ofld_rxq(vi, i, ofld_rxq) { fl = &ofld_rxq->fl; FL_LOCK(fl); find_best_refill_source(sc, fl, maxp); FL_UNLOCK(fl); } #endif } static inline int mbuf_nsegs(struct mbuf *m) { M_ASSERTPKTHDR(m); KASSERT(m->m_pkthdr.l5hlen > 0, ("%s: mbuf %p missing information on # of segments.", __func__, m)); return (m->m_pkthdr.l5hlen); } static inline void set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) { M_ASSERTPKTHDR(m); m->m_pkthdr.l5hlen = nsegs; } static inline int mbuf_len16(struct mbuf *m) { int n; M_ASSERTPKTHDR(m); n = m->m_pkthdr.PH_loc.eight[0]; MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); return (n); } static inline void set_mbuf_len16(struct mbuf *m, uint8_t len16) { M_ASSERTPKTHDR(m); m->m_pkthdr.PH_loc.eight[0] = len16; } static inline int needs_tso(struct mbuf *m) { M_ASSERTPKTHDR(m); if (m->m_pkthdr.csum_flags & CSUM_TSO) { KASSERT(m->m_pkthdr.tso_segsz > 0, ("%s: TSO requested in mbuf %p but MSS not provided", __func__, m)); return (1); } return (0); } static inline int needs_l3_csum(struct mbuf *m) { M_ASSERTPKTHDR(m); if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) return (1); return (0); } static inline int needs_l4_csum(struct mbuf *m) { M_ASSERTPKTHDR(m); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) return (1); return (0); } static inline int needs_vlan_insertion(struct mbuf *m) { M_ASSERTPKTHDR(m); if (m->m_flags & M_VLANTAG) { KASSERT(m->m_pkthdr.ether_vtag != 0, ("%s: HWVLAN requested in mbuf %p but tag not provided", __func__, m)); return (1); } return (0); } static void * m_advance(struct mbuf **pm, int *poffset, int len) { struct mbuf *m = *pm; int offset = *poffset; uintptr_t p = 0; MPASS(len > 0); for (;;) { if (offset + len < m->m_len) { offset += len; p = mtod(m, uintptr_t) + offset; break; } len -= m->m_len - offset; m = m->m_next; offset = 0; MPASS(m != NULL); } *poffset = offset; *pm = m; return ((void *)p); } /* * Can deal with empty mbufs in the chain that have m_len = 0, but the chain * must have at least one mbuf that's not empty. */ static inline int count_mbuf_nsegs(struct mbuf *m) { vm_paddr_t lastb, next; vm_offset_t va; int len, nsegs; MPASS(m != NULL); nsegs = 0; lastb = 0; for (; m; m = m->m_next) { len = m->m_len; if (__predict_false(len == 0)) continue; va = mtod(m, vm_offset_t); next = pmap_kextract(va); nsegs += sglist_count(m->m_data, len); if (lastb + 1 == next) nsegs--; lastb = pmap_kextract(va + len - 1); } MPASS(nsegs > 0); return (nsegs); } /* * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: * a) caller can assume it's been freed if this function returns with an error. * b) it may get defragged up if the gather list is too long for the hardware. */ int parse_pkt(struct adapter *sc, struct mbuf **mp) { struct mbuf *m0 = *mp, *m; int rc, nsegs, defragged = 0, offset; struct ether_header *eh; void *l3hdr; #if defined(INET) || defined(INET6) struct tcphdr *tcp; #endif uint16_t eh_type; M_ASSERTPKTHDR(m0); if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { rc = EINVAL; fail: m_freem(m0); *mp = NULL; return (rc); } restart: /* * First count the number of gather list segments in the payload. * Defrag the mbuf if nsegs exceeds the hardware limit. */ M_ASSERTPKTHDR(m0); MPASS(m0->m_pkthdr.len > 0); nsegs = count_mbuf_nsegs(m0); if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { rc = EFBIG; goto fail; } *mp = m0 = m; /* update caller's copy after defrag */ goto restart; } if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) { m0 = m_pullup(m0, m0->m_pkthdr.len); if (m0 == NULL) { /* Should have left well enough alone. */ rc = EFBIG; goto fail; } *mp = m0; /* update caller's copy after pullup */ goto restart; } set_mbuf_nsegs(m0, nsegs); if (sc->flags & IS_VF) set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0))); else set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); if (!needs_tso(m0) && !(sc->flags & IS_VF && (needs_l3_csum(m0) || needs_l4_csum(m0)))) return (0); m = m0; eh = mtod(m, struct ether_header *); eh_type = ntohs(eh->ether_type); if (eh_type == ETHERTYPE_VLAN) { struct ether_vlan_header *evh = (void *)eh; eh_type = ntohs(evh->evl_proto); m0->m_pkthdr.l2hlen = sizeof(*evh); } else m0->m_pkthdr.l2hlen = sizeof(*eh); offset = 0; l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); switch (eh_type) { #ifdef INET6 case ETHERTYPE_IPV6: { struct ip6_hdr *ip6 = l3hdr; MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP); m0->m_pkthdr.l3hlen = sizeof(*ip6); break; } #endif #ifdef INET case ETHERTYPE_IP: { struct ip *ip = l3hdr; m0->m_pkthdr.l3hlen = ip->ip_hl * 4; break; } #endif default: panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" " with the same INET/INET6 options as the kernel.", __func__, eh_type); } #if defined(INET) || defined(INET6) if (needs_tso(m0)) { tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); m0->m_pkthdr.l4hlen = tcp->th_off * 4; } #endif MPASS(m0 == *mp); return (0); } void * start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) { struct sge_eq *eq = &wrq->eq; struct adapter *sc = wrq->adapter; int ndesc, available; struct wrqe *wr; void *w; MPASS(len16 > 0); ndesc = howmany(len16, EQ_ESIZE / 16); MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); EQ_LOCK(eq); if (!STAILQ_EMPTY(&wrq->wr_list)) drain_wrq_wr_list(sc, wrq); if (!STAILQ_EMPTY(&wrq->wr_list)) { slowpath: EQ_UNLOCK(eq); wr = alloc_wrqe(len16 * 16, wrq); if (__predict_false(wr == NULL)) return (NULL); cookie->pidx = -1; cookie->ndesc = ndesc; return (&wr->wr); } eq->cidx = read_hw_cidx(eq); if (eq->pidx == eq->cidx) available = eq->sidx - 1; else available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; if (available < ndesc) goto slowpath; cookie->pidx = eq->pidx; cookie->ndesc = ndesc; TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); w = &eq->desc[eq->pidx]; IDXINCR(eq->pidx, ndesc, eq->sidx); if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { w = &wrq->ss[0]; wrq->ss_pidx = cookie->pidx; wrq->ss_len = len16 * 16; } EQ_UNLOCK(eq); return (w); } void commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) { struct sge_eq *eq = &wrq->eq; struct adapter *sc = wrq->adapter; int ndesc, pidx; struct wrq_cookie *prev, *next; if (cookie->pidx == -1) { struct wrqe *wr = __containerof(w, struct wrqe, wr); t4_wrq_tx(sc, wr); return; } ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ pidx = cookie->pidx; MPASS(pidx >= 0 && pidx < eq->sidx); if (__predict_false(w == &wrq->ss[0])) { int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; MPASS(wrq->ss_len > n); /* WR had better wrap around. */ bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); wrq->tx_wrs_ss++; } else wrq->tx_wrs_direct++; EQ_LOCK(eq); prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); next = TAILQ_NEXT(cookie, link); if (prev == NULL) { MPASS(pidx == eq->dbidx); if (next == NULL || ndesc >= 16) ring_eq_db(wrq->adapter, eq, ndesc); else { MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); next->pidx = pidx; next->ndesc += ndesc; } } else { MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); prev->ndesc += ndesc; } TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) drain_wrq_wr_list(sc, wrq); #ifdef INVARIANTS if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { /* Doorbell must have caught up to the pidx. */ MPASS(wrq->eq.pidx == wrq->eq.dbidx); } #endif EQ_UNLOCK(eq); } static u_int can_resume_eth_tx(struct mp_ring *r) { struct sge_eq *eq = r->cookie; return (total_available_tx_desc(eq) > eq->sidx / 8); } static inline int cannot_use_txpkts(struct mbuf *m) { /* maybe put a GL limit too, to avoid silliness? */ return (needs_tso(m)); } static inline int discard_tx(struct sge_eq *eq) { return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); } /* * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to * be consumed. Return the actual number consumed. 0 indicates a stall. */ static u_int eth_tx(struct mp_ring *r, u_int cidx, u_int pidx) { struct sge_txq *txq = r->cookie; struct sge_eq *eq = &txq->eq; struct ifnet *ifp = txq->ifp; struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; u_int total, remaining; /* # of packets */ u_int available, dbdiff; /* # of hardware descriptors */ u_int n, next_cidx; struct mbuf *m0, *tail; struct txpkts txp; struct fw_eth_tx_pkts_wr *wr; /* any fw WR struct will do */ remaining = IDXDIFF(pidx, cidx, r->size); MPASS(remaining > 0); /* Must not be called without work to do. */ total = 0; TXQ_LOCK(txq); if (__predict_false(discard_tx(eq))) { while (cidx != pidx) { m0 = r->items[cidx]; m_freem(m0); if (++cidx == r->size) cidx = 0; } reclaim_tx_descs(txq, 2048); total = remaining; goto done; } /* How many hardware descriptors do we have readily available. */ if (eq->pidx == eq->cidx) available = eq->sidx - 1; else available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx); while (remaining > 0) { m0 = r->items[cidx]; M_ASSERTPKTHDR(m0); MPASS(m0->m_nextpkt == NULL); if (available < SGE_MAX_WR_NDESC) { available += reclaim_tx_descs(txq, 64); if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16)) break; /* out of descriptors */ } next_cidx = cidx + 1; if (__predict_false(next_cidx == r->size)) next_cidx = 0; wr = (void *)&eq->desc[eq->pidx]; if (sc->flags & IS_VF) { total++; remaining--; ETHER_BPF_MTAP(ifp, m0); n = write_txpkt_vm_wr(sc, txq, (void *)wr, m0, available); } else if (remaining > 1 && try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) { /* pkts at cidx, next_cidx should both be in txp. */ MPASS(txp.npkt == 2); tail = r->items[next_cidx]; MPASS(tail->m_nextpkt == NULL); ETHER_BPF_MTAP(ifp, m0); ETHER_BPF_MTAP(ifp, tail); m0->m_nextpkt = tail; if (__predict_false(++next_cidx == r->size)) next_cidx = 0; while (next_cidx != pidx) { if (add_to_txpkts(r->items[next_cidx], &txp, available) != 0) break; tail->m_nextpkt = r->items[next_cidx]; tail = tail->m_nextpkt; ETHER_BPF_MTAP(ifp, tail); if (__predict_false(++next_cidx == r->size)) next_cidx = 0; } n = write_txpkts_wr(txq, wr, m0, &txp, available); total += txp.npkt; remaining -= txp.npkt; } else { total++; remaining--; ETHER_BPF_MTAP(ifp, m0); n = write_txpkt_wr(txq, (void *)wr, m0, available); } MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC); available -= n; dbdiff += n; IDXINCR(eq->pidx, n, eq->sidx); if (total_available_tx_desc(eq) < eq->sidx / 4 && atomic_cmpset_int(&eq->equiq, 0, 1)) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | F_FW_WR_EQUEQ); eq->equeqidx = eq->pidx; } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); eq->equeqidx = eq->pidx; } if (dbdiff >= 16 && remaining >= 4) { ring_eq_db(sc, eq, dbdiff); available += reclaim_tx_descs(txq, 4 * dbdiff); dbdiff = 0; } cidx = next_cidx; } if (dbdiff != 0) { ring_eq_db(sc, eq, dbdiff); reclaim_tx_descs(txq, 32); } done: TXQ_UNLOCK(txq); return (total); } static inline void init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, int qsize) { KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, ("%s: bad tmr_idx %d", __func__, tmr_idx)); KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ ("%s: bad pktc_idx %d", __func__, pktc_idx)); iq->flags = 0; iq->adapter = sc; iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); iq->intr_pktc_idx = SGE_NCOUNTERS - 1; if (pktc_idx >= 0) { iq->intr_params |= F_QINTR_CNT_EN; iq->intr_pktc_idx = pktc_idx; } iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; } static inline void init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) { fl->qsize = qsize; fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; strlcpy(fl->lockname, name, sizeof(fl->lockname)); if (sc->flags & BUF_PACKING_OK && ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ fl->flags |= FL_BUF_PACKING; find_best_refill_source(sc, fl, maxp); find_safe_refill_source(sc, fl); } static inline void init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan, uint16_t iqid, char *name) { KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); eq->flags = eqtype & EQ_TYPEMASK; eq->tx_chan = tx_chan; eq->iqid = iqid; eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; strlcpy(eq->lockname, name, sizeof(eq->lockname)); } static int alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, bus_dmamap_t *map, bus_addr_t *pa, void **va) { int rc; rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); if (rc != 0) { device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); goto done; } rc = bus_dmamem_alloc(*tag, va, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); if (rc != 0) { device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); goto done; } rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); if (rc != 0) { device_printf(sc->dev, "cannot load DMA map: %d\n", rc); goto done; } done: if (rc) free_ring(sc, *tag, *map, *pa, *va); return (rc); } static int free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, bus_addr_t pa, void *va) { if (pa) bus_dmamap_unload(tag, map); if (va) bus_dmamem_free(tag, va, map); if (tag) bus_dma_tag_destroy(tag); return (0); } /* * Allocates the ring for an ingress queue and an optional freelist. If the * freelist is specified it will be allocated and then associated with the * ingress queue. * * Returns errno on failure. Resources allocated up to that point may still be * allocated. Caller is responsible for cleanup in case this function fails. * * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then * the intr_idx specifies the vector, starting from 0. Otherwise it specifies * the abs_id of the ingress queue to which its interrupts should be forwarded. */ static int alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, int intr_idx, int cong) { int rc, i, cntxt_id; size_t len; struct fw_iq_cmd c; struct port_info *pi = vi->pi; struct adapter *sc = iq->adapter; struct sge_params *sp = &sc->params.sge; __be32 v = 0; len = iq->qsize * IQ_ESIZE; rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, (void **)&iq->desc); if (rc != 0) return (rc); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | V_FW_IQ_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); /* Special handling for firmware event queue */ if (iq == &sc->sge.fwq) v |= F_FW_IQ_CMD_IQASYNCH; if (iq->flags & IQ_INTR) { KASSERT(intr_idx < sc->intr_count, ("%s: invalid direct intr_idx %d", __func__, intr_idx)); } else v |= F_FW_IQ_CMD_IQANDST; v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); c.type_to_iqandstindex = htobe32(v | V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | V_FW_IQ_CMD_VIID(vi->viid) | V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); c.iqsize = htobe16(iq->qsize); c.iqaddr = htobe64(iq->ba); if (cong >= 0) c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); if (fl) { mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); len = fl->qsize * EQ_ESIZE; rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, &fl->ba, (void **)&fl->desc); if (rc) return (rc); /* Allocate space for one software descriptor per buffer. */ rc = alloc_fl_sdesc(fl); if (rc != 0) { device_printf(sc->dev, "failed to setup fl software descriptors: %d\n", rc); return (rc); } if (fl->flags & FL_BUF_PACKING) { fl->lowat = roundup2(sp->fl_starve_threshold2, 8); fl->buf_boundary = sp->pack_boundary; } else { fl->lowat = roundup2(sp->fl_starve_threshold, 8); fl->buf_boundary = 16; } if (fl_pad && fl->buf_boundary < sp->pad_boundary) fl->buf_boundary = sp->pad_boundary; c.iqns_to_fl0congen |= htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 0)); if (cong >= 0) { c.iqns_to_fl0congen |= htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | F_FW_IQ_CMD_FL0CONGEN); } c.fl0dcaen_to_fl0cidxfthresh = htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); c.fl0size = htobe16(fl->qsize); c.fl0addr = htobe64(fl->ba); } rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(sc->dev, "failed to create ingress queue: %d\n", rc); return (rc); } iq->cidx = 0; iq->gen = F_RSPD_GEN; iq->intr_next = iq->intr_params; iq->cntxt_id = be16toh(c.iqid); iq->abs_id = be16toh(c.physiqid); iq->flags |= IQ_ALLOCATED; cntxt_id = iq->cntxt_id - sc->sge.iq_start; if (cntxt_id >= sc->sge.niq) { panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.niq - 1); } sc->sge.iqmap[cntxt_id] = iq; if (fl) { u_int qid; iq->flags |= IQ_HAS_FL; fl->cntxt_id = be16toh(c.fl0id); fl->pidx = fl->cidx = 0; cntxt_id = fl->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) { panic("%s: fl->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); } sc->sge.eqmap[cntxt_id] = (void *)fl; qid = fl->cntxt_id; if (isset(&sc->doorbells, DOORBELL_UDB)) { uint32_t s_qpp = sc->params.sge.eq_s_qpp; uint32_t mask = (1 << s_qpp) - 1; volatile uint8_t *udb; udb = sc->udbs_base + UDBS_DB_OFFSET; udb += (qid >> s_qpp) << PAGE_SHIFT; qid &= mask; if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { udb += qid << UDBS_SEG_SHIFT; qid = 0; } fl->udb = (volatile void *)udb; } fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; FL_LOCK(fl); /* Enough to make sure the SGE doesn't think it's starved */ refill_fl(sc, fl, fl->lowat); FL_UNLOCK(fl); } if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) { uint32_t param, val; param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); if (cong == 0) val = 1 << 19; else { val = 2 << 19; for (i = 0; i < 4; i++) { if (cong & (1 << i)) val |= 1 << (i << 2); } } rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { /* report error but carry on */ device_printf(sc->dev, "failed to set congestion manager context for " "ingress queue %d: %d\n", iq->cntxt_id, rc); } } /* Enable IQ interrupts */ atomic_store_rel_int(&iq->state, IQS_IDLE); t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id)); return (0); } static int free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) { int rc; struct adapter *sc = iq->adapter; device_t dev; if (sc == NULL) return (0); /* nothing to do */ dev = vi ? vi->dev : sc->dev; if (iq->flags & IQ_ALLOCATED) { rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); if (rc != 0) { device_printf(dev, "failed to free queue %p: %d\n", iq, rc); return (rc); } iq->flags &= ~IQ_ALLOCATED; } free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); bzero(iq, sizeof(*iq)); if (fl) { free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, fl->desc); if (fl->sdesc) free_fl_sdesc(sc, fl); if (mtx_initialized(&fl->fl_lock)) mtx_destroy(&fl->fl_lock); bzero(fl, sizeof(*fl)); } return (0); } static void add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, struct sge_fl *fl) { struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, "freelist"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &fl->ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, "desc ring size in bytes"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the freelist"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, fl_pad ? 1 : 0, "padding enabled"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 0, "consumer index"); if (fl->flags & FL_BUF_PACKING) { SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); } SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 0, "producer index"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); } static int alloc_fwq(struct adapter *sc) { int rc, intr_idx; struct sge_iq *fwq = &sc->sge.fwq; struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); fwq->flags |= IQ_INTR; /* always */ if (sc->flags & IS_VF) intr_idx = 0; else { intr_idx = sc->intr_count > 1 ? 1 : 0; fwq->set_tcb_rpl = t4_filter_rpl; fwq->l2t_write_rpl = do_l2t_write_rpl; } rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); if (rc != 0) { device_printf(sc->dev, "failed to create firmware event queue: %d\n", rc); return (rc); } oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, NULL, "firmware event queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UAUTO(&sc->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &fwq->ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(&sc->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, fwq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id", CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I", "absolute id of the queue"); SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the queue"); SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I", "consumer index"); return (0); } static int free_fwq(struct adapter *sc) { return free_iq_fl(NULL, &sc->sge.fwq, NULL); } static int alloc_mgmtq(struct adapter *sc) { int rc; struct sge_wrq *mgmtq = &sc->sge.mgmtq; char name[16]; struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD, NULL, "management queue"); snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev)); init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan, sc->sge.fwq.cntxt_id, name); rc = alloc_wrq(sc, NULL, mgmtq, oid); if (rc != 0) { device_printf(sc->dev, "failed to create management queue: %d\n", rc); return (rc); } return (0); } static int free_mgmtq(struct adapter *sc) { return free_wrq(sc, &sc->sge.mgmtq); } int tnl_cong(struct port_info *pi, int drop) { if (drop == -1) return (-1); else if (drop == 1) return (0); else return (pi->rx_chan_map); } static int alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx, struct sysctl_oid *oid) { int rc; struct adapter *sc = vi->pi->adapter; struct sysctl_oid_list *children; char name[16]; rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(vi->pi, cong_drop)); if (rc != 0) return (rc); if (idx == 0) sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; else KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, ("iq_base mismatch")); KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, ("PF with non-zero iq_base")); /* * The freelist is just barely above the starvation threshold right now, * fill it up a bit more. */ FL_LOCK(&rxq->fl); refill_fl(sc, &rxq->fl, 128); FL_UNLOCK(&rxq->fl); #if defined(INET) || defined(INET6) rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs); if (rc != 0) return (rc); MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */ if (vi->ifp->if_capenable & IFCAP_LRO) rxq->iq.flags |= IQ_LRO_ENABLED; #endif rxq->ifp = vi->ifp; children = SYSCTL_CHILDREN(oid); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "rx queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &rxq->iq.ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, rxq->iq.qsize * IQ_ESIZE, "descriptor ring size in bytes"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "abs_id", CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I", "absolute id of the queue"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the queue"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I", "consumer index"); #if defined(INET) || defined(INET6) SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, &rxq->lro.lro_queued, 0, NULL); SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, &rxq->lro.lro_flushed, 0, NULL); #endif SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, &rxq->rxcsum, "# of times hardware assisted with checksum"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", CTLFLAG_RD, &rxq->vlan_extraction, "# of times hardware extracted 802.1Q tag"); add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); return (rc); } static int free_rxq(struct vi_info *vi, struct sge_rxq *rxq) { int rc; #if defined(INET) || defined(INET6) if (rxq->lro.ifp) { tcp_lro_free(&rxq->lro); rxq->lro.ifp = NULL; } #endif rc = free_iq_fl(vi, &rxq->iq, &rxq->fl); if (rc == 0) bzero(rxq, sizeof(*rxq)); return (rc); } #ifdef TCP_OFFLOAD static int alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, int intr_idx, int idx, struct sysctl_oid *oid) { struct port_info *pi = vi->pi; int rc; struct sysctl_oid_list *children; char name[16]; rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, pi->rx_chan_map); if (rc != 0) return (rc); children = SYSCTL_CHILDREN(oid); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "rx queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &ofld_rxq->iq.ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, ofld_rxq->iq.qsize * IQ_ESIZE, "descriptor ring size in bytes"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "abs_id", CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16, "I", "absolute id of the queue"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the queue"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I", "consumer index"); add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl); return (rc); } static int free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) { int rc; rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl); if (rc == 0) bzero(ofld_rxq, sizeof(*ofld_rxq)); return (rc); } #endif #ifdef DEV_NETMAP static int alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, int idx, struct sysctl_oid *oid) { int rc; struct sysctl_oid_list *children; struct sysctl_ctx_list *ctx; char name[16]; size_t len; struct adapter *sc = vi->pi->adapter; struct netmap_adapter *na = NA(vi->ifp); MPASS(na != NULL); len = vi->qsize_rxq * IQ_ESIZE; rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); if (rc != 0) return (rc); len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); if (rc != 0) return (rc); nm_rxq->vi = vi; nm_rxq->nid = idx; nm_rxq->iq_cidx = 0; nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; nm_rxq->iq_gen = F_RSPD_GEN; nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; nm_rxq->fl_sidx = na->num_rx_desc; nm_rxq->intr_idx = intr_idx; ctx = &vi->ctx; children = SYSCTL_CHILDREN(oid); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "rx queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, "I", "absolute id of the queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", "consumer index"); children = SYSCTL_CHILDREN(oid); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, "freelist"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, "I", "SGE context id of the freelist"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &nm_rxq->fl_cidx, 0, "consumer index"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &nm_rxq->fl_pidx, 0, "producer index"); return (rc); } static int free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) { struct adapter *sc = vi->pi->adapter; free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, nm_rxq->iq_desc); free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, nm_rxq->fl_desc); return (0); } static int alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx, struct sysctl_oid *oid) { int rc; size_t len; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct netmap_adapter *na = NA(vi->ifp); char name[16]; struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, &nm_txq->ba, (void **)&nm_txq->desc); if (rc) return (rc); nm_txq->pidx = nm_txq->cidx = 0; nm_txq->sidx = na->num_tx_desc; nm_txq->nid = idx; nm_txq->iqidx = iqidx; nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) | V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) | V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid))); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "netmap tx queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, &nm_txq->cntxt_id, 0, "SGE context id of the queue"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", "consumer index"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", "producer index"); return (rc); } static int free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) { struct adapter *sc = vi->pi->adapter; free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, nm_txq->desc); return (0); } #endif static int ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) { int rc, cntxt_id; struct fw_eq_ctrl_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | V_FW_EQ_CTRL_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); c.physeqid_pkd = htobe32(0); c.fetchszm_to_iqid = htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); c.eqaddr = htobe64(eq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(sc->dev, "failed to create control queue %d: %d\n", eq->tx_chan, rc); return (rc); } eq->flags |= EQ_ALLOCATED; eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); cntxt_id = eq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = eq; return (rc); } static int eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) { int rc, cntxt_id; struct fw_eq_eth_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); c.fetchszm_to_iqid = htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_ETH_CMD_EQSIZE(qsize)); c.eqaddr = htobe64(eq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(vi->dev, "failed to create Ethernet egress queue: %d\n", rc); return (rc); } eq->flags |= EQ_ALLOCATED; eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); cntxt_id = eq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = eq; return (rc); } #ifdef TCP_OFFLOAD static int ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) { int rc, cntxt_id; struct fw_eq_ofld_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; bzero(&c, sizeof(c)); c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | V_FW_EQ_OFLD_CMD_VFN(0)); c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); c.fetchszm_to_iqid = htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); c.eqaddr = htobe64(eq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(vi->dev, "failed to create egress queue for TCP offload: %d\n", rc); return (rc); } eq->flags |= EQ_ALLOCATED; eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); cntxt_id = eq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = eq; return (rc); } #endif static int alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) { int rc, qsize; size_t len; mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; len = qsize * EQ_ESIZE; rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, &eq->ba, (void **)&eq->desc); if (rc) return (rc); eq->pidx = eq->cidx = 0; eq->equeqidx = eq->dbidx = 0; eq->doorbells = sc->doorbells; switch (eq->flags & EQ_TYPEMASK) { case EQ_CTRL: rc = ctrl_eq_alloc(sc, eq); break; case EQ_ETH: rc = eth_eq_alloc(sc, vi, eq); break; #ifdef TCP_OFFLOAD case EQ_OFLD: rc = ofld_eq_alloc(sc, vi, eq); break; #endif default: panic("%s: invalid eq type %d.", __func__, eq->flags & EQ_TYPEMASK); } if (rc != 0) { device_printf(sc->dev, "failed to allocate egress queue(%d): %d\n", eq->flags & EQ_TYPEMASK, rc); } if (isset(&eq->doorbells, DOORBELL_UDB) || isset(&eq->doorbells, DOORBELL_UDBWC) || isset(&eq->doorbells, DOORBELL_WCWR)) { uint32_t s_qpp = sc->params.sge.eq_s_qpp; uint32_t mask = (1 << s_qpp) - 1; volatile uint8_t *udb; udb = sc->udbs_base + UDBS_DB_OFFSET; udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ eq->udb_qid = eq->cntxt_id & mask; /* id in page */ if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) clrbit(&eq->doorbells, DOORBELL_WCWR); else { udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ eq->udb_qid = 0; } eq->udb = (volatile void *)udb; } return (rc); } static int free_eq(struct adapter *sc, struct sge_eq *eq) { int rc; if (eq->flags & EQ_ALLOCATED) { switch (eq->flags & EQ_TYPEMASK) { case EQ_CTRL: rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); break; case EQ_ETH: rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); break; #ifdef TCP_OFFLOAD case EQ_OFLD: rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); break; #endif default: panic("%s: invalid eq type %d.", __func__, eq->flags & EQ_TYPEMASK); } if (rc != 0) { device_printf(sc->dev, "failed to free egress queue (%d): %d\n", eq->flags & EQ_TYPEMASK, rc); return (rc); } eq->flags &= ~EQ_ALLOCATED; } free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); if (mtx_initialized(&eq->eq_lock)) mtx_destroy(&eq->eq_lock); bzero(eq, sizeof(*eq)); return (0); } static int alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, struct sysctl_oid *oid) { int rc; struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx; struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); rc = alloc_eq(sc, vi, &wrq->eq); if (rc) return (rc); wrq->adapter = sc; TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); TAILQ_INIT(&wrq->incomplete_wrs); STAILQ_INIT(&wrq->wr_list); wrq->nwr_pending = 0; wrq->ndesc_needed = 0; SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &wrq->eq.ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len, "desc ring size in bytes"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", "consumer index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", "producer index"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, wrq->eq.sidx, "status page index"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, &wrq->tx_wrs_direct, "# of work requests (direct)"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, &wrq->tx_wrs_copied, "# of work requests (copied)"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); return (rc); } static int free_wrq(struct adapter *sc, struct sge_wrq *wrq) { int rc; rc = free_eq(sc, &wrq->eq); if (rc) return (rc); bzero(wrq, sizeof(*wrq)); return (0); } static int alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, struct sysctl_oid *oid) { int rc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct sge_eq *eq = &txq->eq; char name[16]; struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, M_CXGBE, M_WAITOK); if (rc != 0) { device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); return (rc); } rc = alloc_eq(sc, vi, eq); if (rc != 0) { mp_ring_free(txq->r); txq->r = NULL; return (rc); } /* Can't fail after this point. */ if (idx == 0) sc->sge.eq_base = eq->abs_id - eq->cntxt_id; else KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, ("eq_base mismatch")); KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, ("PF with non-zero eq_base")); TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); txq->ifp = vi->ifp; txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); if (sc->flags & IS_VF) txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | V_TXPKT_INTF(pi->tx_chan)); else txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) | V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) | V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid))); txq->tc_idx = -1; txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, M_ZERO | M_WAITOK); snprintf(name, sizeof(name), "%d", idx); oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, "tx queue"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &eq->ba, "bus address of descriptor ring"); SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, "desc ring size in bytes"); SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, &eq->abs_id, 0, "absolute id of the queue"); SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, &eq->cntxt_id, 0, "SGE context id of the queue"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", "consumer index"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", "producer index"); SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, eq->sidx, "status page index"); SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc", CTLTYPE_INT | CTLFLAG_RW, vi, idx, sysctl_tc, "I", "traffic class (-1 means none)"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, &txq->txcsum, "# of times hardware assisted with checksum"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion", CTLFLAG_RD, &txq->vlan_insertion, "# of times hardware inserted 802.1Q tag"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, &txq->tso_wrs, "# of TSO work requests"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, &txq->imm_wrs, "# of work requests with immediate data"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, &txq->sgl_wrs, "# of work requests with direct SGL"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs", CTLFLAG_RD, &txq->txpkts0_wrs, "# of txpkts (type 0) work requests"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs", CTLFLAG_RD, &txq->txpkts1_wrs, "# of txpkts (type 1) work requests"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", CTLFLAG_RD, &txq->txpkts0_pkts, "# of frames tx'd using type0 txpkts work requests"); SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", CTLFLAG_RD, &txq->txpkts1_pkts, "# of frames tx'd using type1 txpkts work requests"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues", CTLFLAG_RD, &txq->r->enqueues, "# of enqueues to the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops", CTLFLAG_RD, &txq->r->drops, "# of drops in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts", CTLFLAG_RD, &txq->r->starts, "# of normal consumer starts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls", CTLFLAG_RD, &txq->r->stalls, "# of consumer stalls in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts", CTLFLAG_RD, &txq->r->restarts, "# of consumer restarts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications", CTLFLAG_RD, &txq->r->abdications, "# of consumer abdications in the mp_ring for this queue"); return (0); } static int free_txq(struct vi_info *vi, struct sge_txq *txq) { int rc; struct adapter *sc = vi->pi->adapter; struct sge_eq *eq = &txq->eq; rc = free_eq(sc, eq); if (rc) return (rc); sglist_free(txq->gl); free(txq->sdesc, M_CXGBE); mp_ring_free(txq->r); bzero(txq, sizeof(*txq)); return (0); } static void oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *ba = arg; KASSERT(nseg == 1, ("%s meant for single segment mappings only.", __func__)); *ba = error ? 0 : segs->ds_addr; } static inline void ring_fl_db(struct adapter *sc, struct sge_fl *fl) { uint32_t n, v; n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx); MPASS(n > 0); wmb(); v = fl->dbval | V_PIDX(n); if (fl->udb) *fl->udb = htole32(v); else t4_write_reg(sc, sc->sge_kdoorbell_reg, v); IDXINCR(fl->dbidx, n, fl->sidx); } /* * Fills up the freelist by allocating up to 'n' buffers. Buffers that are * recycled do not count towards this allocation budget. * * Returns non-zero to indicate that this freelist should be added to the list * of starving freelists. */ static int refill_fl(struct adapter *sc, struct sge_fl *fl, int n) { __be64 *d; struct fl_sdesc *sd; uintptr_t pa; caddr_t cl; struct cluster_layout *cll; struct sw_zone_info *swz; struct cluster_metadata *clm; uint16_t max_pidx; uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ FL_LOCK_ASSERT_OWNED(fl); /* * We always stop at the beginning of the hardware descriptor that's just * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, * which would mean an empty freelist to the chip. */ max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; if (fl->pidx == max_pidx * 8) return (0); d = &fl->desc[fl->pidx]; sd = &fl->sdesc[fl->pidx]; cll = &fl->cll_def; /* default layout */ swz = &sc->sge.sw_zone_info[cll->zidx]; while (n > 0) { if (sd->cl != NULL) { if (sd->nmbuf == 0) { /* * Fast recycle without involving any atomics on * the cluster's metadata (if the cluster has * metadata). This happens when all frames * received in the cluster were small enough to * fit within a single mbuf each. */ fl->cl_fast_recycled++; #ifdef INVARIANTS clm = cl_metadata(sc, fl, &sd->cll, sd->cl); if (clm != NULL) MPASS(clm->refcount == 1); #endif goto recycled_fast; } /* * Cluster is guaranteed to have metadata. Clusters * without metadata always take the fast recycle path * when they're recycled. */ clm = cl_metadata(sc, fl, &sd->cll, sd->cl); MPASS(clm != NULL); if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { fl->cl_recycled++; counter_u64_add(extfree_rels, 1); goto recycled; } sd->cl = NULL; /* gave up my reference */ } MPASS(sd->cl == NULL); alloc: cl = uma_zalloc(swz->zone, M_NOWAIT); if (__predict_false(cl == NULL)) { if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || fl->cll_def.zidx == fl->cll_alt.zidx) break; /* fall back to the safe zone */ cll = &fl->cll_alt; swz = &sc->sge.sw_zone_info[cll->zidx]; goto alloc; } fl->cl_allocated++; n--; pa = pmap_kextract((vm_offset_t)cl); pa += cll->region1; sd->cl = cl; sd->cll = *cll; *d = htobe64(pa | cll->hwidx); clm = cl_metadata(sc, fl, cll, cl); if (clm != NULL) { recycled: #ifdef INVARIANTS clm->sd = sd; #endif clm->refcount = 1; } sd->nmbuf = 0; recycled_fast: d++; sd++; if (__predict_false(++fl->pidx % 8 == 0)) { uint16_t pidx = fl->pidx / 8; if (__predict_false(pidx == fl->sidx)) { fl->pidx = 0; pidx = 0; sd = fl->sdesc; d = fl->desc; } if (pidx == max_pidx) break; if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) ring_fl_db(sc, fl); } } if (fl->pidx / 8 != fl->dbidx) ring_fl_db(sc, fl); return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); } /* * Attempt to refill all starving freelists. */ static void refill_sfl(void *arg) { struct adapter *sc = arg; struct sge_fl *fl, *fl_temp; mtx_assert(&sc->sfl_lock, MA_OWNED); TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { FL_LOCK(fl); refill_fl(sc, fl, 64); if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { TAILQ_REMOVE(&sc->sfl, fl, link); fl->flags &= ~FL_STARVING; } FL_UNLOCK(fl); } if (!TAILQ_EMPTY(&sc->sfl)) callout_schedule(&sc->sfl_callout, hz / 5); } static int alloc_fl_sdesc(struct sge_fl *fl) { fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, M_ZERO | M_WAITOK); return (0); } static void free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) { struct fl_sdesc *sd; struct cluster_metadata *clm; struct cluster_layout *cll; int i; sd = fl->sdesc; for (i = 0; i < fl->sidx * 8; i++, sd++) { if (sd->cl == NULL) continue; cll = &sd->cll; clm = cl_metadata(sc, fl, cll, sd->cl); if (sd->nmbuf == 0) uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) { uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); counter_u64_add(extfree_rels, 1); } sd->cl = NULL; } free(fl->sdesc, M_CXGBE); fl->sdesc = NULL; } static inline void get_pkt_gl(struct mbuf *m, struct sglist *gl) { int rc; M_ASSERTPKTHDR(m); sglist_reset(gl); rc = sglist_append_mbuf(gl, m); if (__predict_false(rc != 0)) { panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " "with %d.", __func__, m, mbuf_nsegs(m), rc); } KASSERT(gl->sg_nseg == mbuf_nsegs(m), ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, mbuf_nsegs(m), gl->sg_nseg)); KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); } /* * len16 for a txpkt WR with a GL. Includes the firmware work request header. */ static inline u_int txpkt_len16(u_int nsegs, u_int tso) { u_int n; MPASS(nsegs > 0); nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); if (tso) n += sizeof(struct cpl_tx_pkt_lso_core); return (howmany(n, 16)); } /* * len16 for a txpkt_vm WR with a GL. Includes the firmware work * request header. */ static inline u_int txpkt_vm_len16(u_int nsegs, u_int tso) { u_int n; MPASS(nsegs > 0); nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct fw_eth_tx_pkt_vm_wr) + sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); if (tso) n += sizeof(struct cpl_tx_pkt_lso_core); return (howmany(n, 16)); } /* * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work * request header. */ static inline u_int txpkts0_len16(u_int nsegs) { u_int n; MPASS(nsegs > 0); nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); return (howmany(n, 16)); } /* * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work * request header. */ static inline u_int txpkts1_len16(void) { u_int n; n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); return (howmany(n, 16)); } static inline u_int imm_payload(u_int ndesc) { u_int n; n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - sizeof(struct cpl_tx_pkt_core); return (n); } /* * Write a VM txpkt WR for this packet to the hardware descriptors, update the * software descriptor, and advance the pidx. It is guaranteed that enough * descriptors are available. * * The return value is the # of hardware descriptors used. */ static u_int write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct fw_eth_tx_pkt_vm_wr *wr, struct mbuf *m0, u_int available) { struct sge_eq *eq = &txq->eq; struct tx_sdesc *txsd; struct cpl_tx_pkt_core *cpl; uint32_t ctrl; /* used in many unrelated places */ uint64_t ctrl1; int csum_type, len16, ndesc, pktlen, nsegs; caddr_t dst; TXQ_LOCK_ASSERT_OWNED(txq); M_ASSERTPKTHDR(m0); MPASS(available > 0 && available < eq->sidx); len16 = mbuf_len16(m0); nsegs = mbuf_nsegs(m0); pktlen = m0->m_pkthdr.len; ctrl = sizeof(struct cpl_tx_pkt_core); if (needs_tso(m0)) ctrl += sizeof(struct cpl_tx_pkt_lso_core); ndesc = howmany(len16, EQ_ESIZE / 16); MPASS(ndesc <= available); /* Firmware work request header */ MPASS(wr == (void *)&eq->desc[eq->pidx]); wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); ctrl = V_FW_WR_LEN16(len16); wr->equiq_to_len16 = htobe32(ctrl); wr->r3[0] = 0; wr->r3[1] = 0; /* * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. * vlantci is ignored unless the ethtype is 0x8100, so it's * simpler to always copy it rather than making it * conditional. Also, it seems that we do not have to set * vlantci or fake the ethtype when doing VLAN tag insertion. */ m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst); csum_type = -1; if (needs_tso(m0)) { struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && m0->m_pkthdr.l4hlen > 0, ("%s: mbuf %p needs TSO but missing header lengths", __func__, m0)); ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) ctrl |= V_LSO_ETHHDR_LEN(1); if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) ctrl |= F_LSO_IPV6; lso->lso_ctrl = htobe32(ctrl); lso->ipid_ofst = htobe16(0); lso->mss = htobe16(m0->m_pkthdr.tso_segsz); lso->seqno_offset = htobe32(0); lso->len = htobe32(pktlen); if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) csum_type = TX_CSUM_TCPIP6; else csum_type = TX_CSUM_TCPIP; cpl = (void *)(lso + 1); txq->tso_wrs++; } else { if (m0->m_pkthdr.csum_flags & CSUM_IP_TCP) csum_type = TX_CSUM_TCPIP; else if (m0->m_pkthdr.csum_flags & CSUM_IP_UDP) csum_type = TX_CSUM_UDPIP; else if (m0->m_pkthdr.csum_flags & CSUM_IP6_TCP) csum_type = TX_CSUM_TCPIP6; else if (m0->m_pkthdr.csum_flags & CSUM_IP6_UDP) csum_type = TX_CSUM_UDPIP6; #if defined(INET) else if (m0->m_pkthdr.csum_flags & CSUM_IP) { /* * XXX: The firmware appears to stomp on the * fragment/flags field of the IP header when * using TX_CSUM_IP. Fall back to doing * software checksums. */ u_short *sump; struct mbuf *m; int offset; m = m0; offset = 0; sump = m_advance(&m, &offset, m0->m_pkthdr.l2hlen + offsetof(struct ip, ip_sum)); *sump = in_cksum_skip(m0, m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen, m0->m_pkthdr.l2hlen); m0->m_pkthdr.csum_flags &= ~CSUM_IP; } #endif cpl = (void *)(wr + 1); } /* Checksum offload */ ctrl1 = 0; if (needs_l3_csum(m0) == 0) ctrl1 |= F_TXPKT_IPCSUM_DIS; if (csum_type >= 0) { KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0, ("%s: mbuf %p needs checksum offload but missing header lengths", __func__, m0)); if (chip_id(sc) <= CHELSIO_T5) { ctrl1 |= V_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen - ETHER_HDR_LEN); } else { ctrl1 |= V_T6_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen - ETHER_HDR_LEN); } ctrl1 |= V_TXPKT_IPHDR_LEN(m0->m_pkthdr.l3hlen); ctrl1 |= V_TXPKT_CSUM_TYPE(csum_type); } else ctrl1 |= F_TXPKT_L4CSUM_DIS; if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) txq->txcsum++; /* some hardware assistance provided */ /* VLAN tag insertion */ if (needs_vlan_insertion(m0)) { ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); txq->vlan_insertion++; } /* CPL header */ cpl->ctrl0 = txq->cpl_ctrl0; cpl->pack = 0; cpl->len = htobe16(pktlen); cpl->ctrl1 = htobe64(ctrl1); /* SGL */ dst = (void *)(cpl + 1); /* * A packet using TSO will use up an entire descriptor for the * firmware work request header, LSO CPL, and TX_PKT_XT CPL. * If this descriptor is the last descriptor in the ring, wrap * around to the front of the ring explicitly for the start of * the sgl. */ if (dst == (void *)&eq->desc[eq->sidx]) { dst = (void *)&eq->desc[0]; write_gl_to_txd(txq, m0, &dst, 0); } else write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); txq->sgl_wrs++; txq->txpkt_wrs++; txsd = &txq->sdesc[eq->pidx]; txsd->m = m0; txsd->desc_used = ndesc; return (ndesc); } /* * Write a txpkt WR for this packet to the hardware descriptors, update the * software descriptor, and advance the pidx. It is guaranteed that enough * descriptors are available. * * The return value is the # of hardware descriptors used. */ static u_int write_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr, struct mbuf *m0, u_int available) { struct sge_eq *eq = &txq->eq; struct tx_sdesc *txsd; struct cpl_tx_pkt_core *cpl; uint32_t ctrl; /* used in many unrelated places */ uint64_t ctrl1; int len16, ndesc, pktlen, nsegs; caddr_t dst; TXQ_LOCK_ASSERT_OWNED(txq); M_ASSERTPKTHDR(m0); MPASS(available > 0 && available < eq->sidx); len16 = mbuf_len16(m0); nsegs = mbuf_nsegs(m0); pktlen = m0->m_pkthdr.len; ctrl = sizeof(struct cpl_tx_pkt_core); if (needs_tso(m0)) ctrl += sizeof(struct cpl_tx_pkt_lso_core); else if (pktlen <= imm_payload(2) && available >= 2) { /* Immediate data. Recalculate len16 and set nsegs to 0. */ ctrl += pktlen; len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + pktlen, 16); nsegs = 0; } ndesc = howmany(len16, EQ_ESIZE / 16); MPASS(ndesc <= available); /* Firmware work request header */ MPASS(wr == (void *)&eq->desc[eq->pidx]); wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); ctrl = V_FW_WR_LEN16(len16); wr->equiq_to_len16 = htobe32(ctrl); wr->r3 = 0; if (needs_tso(m0)) { struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && m0->m_pkthdr.l4hlen > 0, ("%s: mbuf %p needs TSO but missing header lengths", __func__, m0)); ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) ctrl |= V_LSO_ETHHDR_LEN(1); if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) ctrl |= F_LSO_IPV6; lso->lso_ctrl = htobe32(ctrl); lso->ipid_ofst = htobe16(0); lso->mss = htobe16(m0->m_pkthdr.tso_segsz); lso->seqno_offset = htobe32(0); lso->len = htobe32(pktlen); cpl = (void *)(lso + 1); txq->tso_wrs++; } else cpl = (void *)(wr + 1); /* Checksum offload */ ctrl1 = 0; if (needs_l3_csum(m0) == 0) ctrl1 |= F_TXPKT_IPCSUM_DIS; if (needs_l4_csum(m0) == 0) ctrl1 |= F_TXPKT_L4CSUM_DIS; if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) txq->txcsum++; /* some hardware assistance provided */ /* VLAN tag insertion */ if (needs_vlan_insertion(m0)) { ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); txq->vlan_insertion++; } /* CPL header */ cpl->ctrl0 = txq->cpl_ctrl0; cpl->pack = 0; cpl->len = htobe16(pktlen); cpl->ctrl1 = htobe64(ctrl1); /* SGL */ dst = (void *)(cpl + 1); if (nsegs > 0) { write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); txq->sgl_wrs++; } else { struct mbuf *m; for (m = m0; m != NULL; m = m->m_next) { copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); #ifdef INVARIANTS pktlen -= m->m_len; #endif } #ifdef INVARIANTS KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); #endif txq->imm_wrs++; } txq->txpkt_wrs++; txsd = &txq->sdesc[eq->pidx]; txsd->m = m0; txsd->desc_used = ndesc; return (ndesc); } static int try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available) { u_int needed, nsegs1, nsegs2, l1, l2; if (cannot_use_txpkts(m) || cannot_use_txpkts(n)) return (1); nsegs1 = mbuf_nsegs(m); nsegs2 = mbuf_nsegs(n); if (nsegs1 + nsegs2 == 2) { txp->wr_type = 1; l1 = l2 = txpkts1_len16(); } else { txp->wr_type = 0; l1 = txpkts0_len16(nsegs1); l2 = txpkts0_len16(nsegs2); } txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2; needed = howmany(txp->len16, EQ_ESIZE / 16); if (needed > SGE_MAX_WR_NDESC || needed > available) return (1); txp->plen = m->m_pkthdr.len + n->m_pkthdr.len; if (txp->plen > 65535) return (1); txp->npkt = 2; set_mbuf_len16(m, l1); set_mbuf_len16(n, l2); return (0); } static int add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available) { u_int plen, len16, needed, nsegs; MPASS(txp->wr_type == 0 || txp->wr_type == 1); nsegs = mbuf_nsegs(m); if (needs_tso(m) || (txp->wr_type == 1 && nsegs != 1)) return (1); plen = txp->plen + m->m_pkthdr.len; if (plen > 65535) return (1); if (txp->wr_type == 0) len16 = txpkts0_len16(nsegs); else len16 = txpkts1_len16(); needed = howmany(txp->len16 + len16, EQ_ESIZE / 16); if (needed > SGE_MAX_WR_NDESC || needed > available) return (1); txp->npkt++; txp->plen = plen; txp->len16 += len16; set_mbuf_len16(m, len16); return (0); } /* * Write a txpkts WR for the packets in txp to the hardware descriptors, update * the software descriptor, and advance the pidx. It is guaranteed that enough * descriptors are available. * * The return value is the # of hardware descriptors used. */ static u_int write_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr, struct mbuf *m0, const struct txpkts *txp, u_int available) { struct sge_eq *eq = &txq->eq; struct tx_sdesc *txsd; struct cpl_tx_pkt_core *cpl; uint32_t ctrl; uint64_t ctrl1; int ndesc, checkwrap; struct mbuf *m; void *flitp; TXQ_LOCK_ASSERT_OWNED(txq); MPASS(txp->npkt > 0); MPASS(txp->plen < 65536); MPASS(m0 != NULL); MPASS(m0->m_nextpkt != NULL); MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); MPASS(available > 0 && available < eq->sidx); ndesc = howmany(txp->len16, EQ_ESIZE / 16); MPASS(ndesc <= available); MPASS(wr == (void *)&eq->desc[eq->pidx]); wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); ctrl = V_FW_WR_LEN16(txp->len16); wr->equiq_to_len16 = htobe32(ctrl); wr->plen = htobe16(txp->plen); wr->npkt = txp->npkt; wr->r3 = 0; wr->type = txp->wr_type; flitp = wr + 1; /* * At this point we are 16B into a hardware descriptor. If checkwrap is * set then we know the WR is going to wrap around somewhere. We'll * check for that at appropriate points. */ checkwrap = eq->sidx - ndesc < eq->pidx; for (m = m0; m != NULL; m = m->m_nextpkt) { if (txp->wr_type == 0) { struct ulp_txpkt *ulpmc; struct ulptx_idata *ulpsc; /* ULP master command */ ulpmc = flitp; ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); ulpmc->len = htobe32(mbuf_len16(m)); /* ULP subcommand */ ulpsc = (void *)(ulpmc + 1); ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | F_ULP_TX_SC_MORE); ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); cpl = (void *)(ulpsc + 1); if (checkwrap && (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) cpl = (void *)&eq->desc[0]; - txq->txpkts0_pkts += txp->npkt; - txq->txpkts0_wrs++; } else { cpl = flitp; - txq->txpkts1_pkts += txp->npkt; - txq->txpkts1_wrs++; } /* Checksum offload */ ctrl1 = 0; if (needs_l3_csum(m) == 0) ctrl1 |= F_TXPKT_IPCSUM_DIS; if (needs_l4_csum(m) == 0) ctrl1 |= F_TXPKT_L4CSUM_DIS; if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) txq->txcsum++; /* some hardware assistance provided */ /* VLAN tag insertion */ if (needs_vlan_insertion(m)) { ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); txq->vlan_insertion++; } /* CPL header */ cpl->ctrl0 = txq->cpl_ctrl0; cpl->pack = 0; cpl->len = htobe16(m->m_pkthdr.len); cpl->ctrl1 = htobe64(ctrl1); flitp = cpl + 1; if (checkwrap && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) flitp = (void *)&eq->desc[0]; write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); + } + + if (txp->wr_type == 0) { + txq->txpkts0_pkts += txp->npkt; + txq->txpkts0_wrs++; + } else { + txq->txpkts1_pkts += txp->npkt; + txq->txpkts1_wrs++; } txsd = &txq->sdesc[eq->pidx]; txsd->m = m0; txsd->desc_used = ndesc; return (ndesc); } /* * If the SGL ends on an address that is not 16 byte aligned, this function will * add a 0 filled flit at the end. */ static void write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) { struct sge_eq *eq = &txq->eq; struct sglist *gl = txq->gl; struct sglist_seg *seg; __be64 *flitp, *wrap; struct ulptx_sgl *usgl; int i, nflits, nsegs; KASSERT(((uintptr_t)(*to) & 0xf) == 0, ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); get_pkt_gl(m, gl); nsegs = gl->sg_nseg; MPASS(nsegs > 0); nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; flitp = (__be64 *)(*to); wrap = (__be64 *)(&eq->desc[eq->sidx]); seg = &gl->sg_segs[0]; usgl = (void *)flitp; /* * We start at a 16 byte boundary somewhere inside the tx descriptor * ring, so we're at least 16 bytes away from the status page. There is * no chance of a wrap around in the middle of usgl (which is 16 bytes). */ usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(nsegs)); usgl->len0 = htobe32(seg->ss_len); usgl->addr0 = htobe64(seg->ss_paddr); seg++; if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { /* Won't wrap around at all */ for (i = 0; i < nsegs - 1; i++, seg++) { usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); } if (i & 1) usgl->sge[i / 2].len[1] = htobe32(0); flitp += nflits; } else { /* Will wrap somewhere in the rest of the SGL */ /* 2 flits already written, write the rest flit by flit */ flitp = (void *)(usgl + 1); for (i = 0; i < nflits - 2; i++) { if (flitp == wrap) flitp = (void *)eq->desc; *flitp++ = get_flit(seg, nsegs - 1, i); } } if (nflits & 1) { MPASS(((uintptr_t)flitp) & 0xf); *flitp++ = 0; } MPASS((((uintptr_t)flitp) & 0xf) == 0); if (__predict_false(flitp == wrap)) *to = (void *)eq->desc; else *to = (void *)flitp; } static inline void copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) { MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); if (__predict_true((uintptr_t)(*to) + len <= (uintptr_t)&eq->desc[eq->sidx])) { bcopy(from, *to, len); (*to) += len; } else { int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); bcopy(from, *to, portion); from += portion; portion = len - portion; /* remaining */ bcopy(from, (void *)eq->desc, portion); (*to) = (caddr_t)eq->desc + portion; } } static inline void ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) { u_int db; MPASS(n > 0); db = eq->doorbells; if (n > 1) clrbit(&db, DOORBELL_WCWR); wmb(); switch (ffs(db) - 1) { case DOORBELL_UDB: *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); break; case DOORBELL_WCWR: { volatile uint64_t *dst, *src; int i; /* * Queues whose 128B doorbell segment fits in the page do not * use relative qid (udb_qid is always 0). Only queues with * doorbell segments can do WCWR. */ KASSERT(eq->udb_qid == 0 && n == 1, ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", __func__, eq->doorbells, n, eq->dbidx, eq)); dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - UDBS_DB_OFFSET); i = eq->dbidx; src = (void *)&eq->desc[i]; while (src != (void *)&eq->desc[i + 1]) *dst++ = *src++; wmb(); break; } case DOORBELL_UDBWC: *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); wmb(); break; case DOORBELL_KDB: t4_write_reg(sc, sc->sge_kdoorbell_reg, V_QID(eq->cntxt_id) | V_PIDX(n)); break; } IDXINCR(eq->dbidx, n, eq->sidx); } static inline u_int reclaimable_tx_desc(struct sge_eq *eq) { uint16_t hw_cidx; hw_cidx = read_hw_cidx(eq); return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); } static inline u_int total_available_tx_desc(struct sge_eq *eq) { uint16_t hw_cidx, pidx; hw_cidx = read_hw_cidx(eq); pidx = eq->pidx; if (pidx == hw_cidx) return (eq->sidx - 1); else return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); } static inline uint16_t read_hw_cidx(struct sge_eq *eq) { struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; uint16_t cidx = spg->cidx; /* stable snapshot */ return (be16toh(cidx)); } /* * Reclaim 'n' descriptors approximately. */ static u_int reclaim_tx_descs(struct sge_txq *txq, u_int n) { struct tx_sdesc *txsd; struct sge_eq *eq = &txq->eq; u_int can_reclaim, reclaimed; TXQ_LOCK_ASSERT_OWNED(txq); MPASS(n > 0); reclaimed = 0; can_reclaim = reclaimable_tx_desc(eq); while (can_reclaim && reclaimed < n) { int ndesc; struct mbuf *m, *nextpkt; txsd = &txq->sdesc[eq->cidx]; ndesc = txsd->desc_used; /* Firmware doesn't return "partial" credits. */ KASSERT(can_reclaim >= ndesc, ("%s: unexpected number of credits: %d, %d", __func__, can_reclaim, ndesc)); for (m = txsd->m; m != NULL; m = nextpkt) { nextpkt = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } reclaimed += ndesc; can_reclaim -= ndesc; IDXINCR(eq->cidx, ndesc, eq->sidx); } return (reclaimed); } static void tx_reclaim(void *arg, int n) { struct sge_txq *txq = arg; struct sge_eq *eq = &txq->eq; do { if (TXQ_TRYLOCK(txq) == 0) break; n = reclaim_tx_descs(txq, 32); if (eq->cidx == eq->pidx) eq->equeqidx = eq->pidx; TXQ_UNLOCK(txq); } while (n > 0); } static __be64 get_flit(struct sglist_seg *segs, int nsegs, int idx) { int i = (idx / 3) * 2; switch (idx % 3) { case 0: { __be64 rc; rc = htobe32(segs[i].ss_len); if (i + 1 < nsegs) rc |= (uint64_t)htobe32(segs[i + 1].ss_len) << 32; return (rc); } case 1: return (htobe64(segs[i].ss_paddr)); case 2: return (htobe64(segs[i + 1].ss_paddr)); } return (0); } static void find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) { int8_t zidx, hwidx, idx; uint16_t region1, region3; int spare, spare_needed, n; struct sw_zone_info *swz; struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; /* * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize * large enough for the max payload and cluster metadata. Otherwise * settle for the largest bufsize that leaves enough room in the cluster * for metadata. * * Without buffer packing: Look for the smallest zone which has a * bufsize large enough for the max payload. Settle for the largest * bufsize available if there's nothing big enough for max payload. */ spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; swz = &sc->sge.sw_zone_info[0]; hwidx = -1; for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { if (swz->size > largest_rx_cluster) { if (__predict_true(hwidx != -1)) break; /* * This is a misconfiguration. largest_rx_cluster is * preventing us from finding a refill source. See * dev.t5nex..buffer_sizes to figure out why. */ device_printf(sc->dev, "largest_rx_cluster=%u leaves no" " refill source for fl %p (dma %u). Ignored.\n", largest_rx_cluster, fl, maxp); } for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { hwb = &hwb_list[idx]; spare = swz->size - hwb->size; if (spare < spare_needed) continue; hwidx = idx; /* best option so far */ if (hwb->size >= maxp) { if ((fl->flags & FL_BUF_PACKING) == 0) goto done; /* stop looking (not packing) */ if (swz->size >= safest_rx_cluster) goto done; /* stop looking (packing) */ } break; /* keep looking, next zone */ } } done: /* A usable hwidx has been located. */ MPASS(hwidx != -1); hwb = &hwb_list[hwidx]; zidx = hwb->zidx; swz = &sc->sge.sw_zone_info[zidx]; region1 = 0; region3 = swz->size - hwb->size; /* * Stay within this zone and see if there is a better match when mbuf * inlining is allowed. Remember that the hwidx's are sorted in * decreasing order of size (so in increasing order of spare area). */ for (idx = hwidx; idx != -1; idx = hwb->next) { hwb = &hwb_list[idx]; spare = swz->size - hwb->size; if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) break; /* * Do not inline mbufs if doing so would violate the pad/pack * boundary alignment requirement. */ if (fl_pad && (MSIZE % sc->params.sge.pad_boundary) != 0) continue; if (fl->flags & FL_BUF_PACKING && (MSIZE % sc->params.sge.pack_boundary) != 0) continue; if (spare < CL_METADATA_SIZE + MSIZE) continue; n = (spare - CL_METADATA_SIZE) / MSIZE; if (n > howmany(hwb->size, maxp)) break; hwidx = idx; if (fl->flags & FL_BUF_PACKING) { region1 = n * MSIZE; region3 = spare - region1; } else { region1 = MSIZE; region3 = spare - region1; break; } } KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == sc->sge.sw_zone_info[zidx].size, ("%s: bad buffer layout for fl %p, maxp %d. " "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, sc->sge.sw_zone_info[zidx].size, region1, sc->sge.hw_buf_info[hwidx].size, region3)); if (fl->flags & FL_BUF_PACKING || region1 > 0) { KASSERT(region3 >= CL_METADATA_SIZE, ("%s: no room for metadata. fl %p, maxp %d; " "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, sc->sge.sw_zone_info[zidx].size, region1, sc->sge.hw_buf_info[hwidx].size, region3)); KASSERT(region1 % MSIZE == 0, ("%s: bad mbuf region for fl %p, maxp %d. " "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, sc->sge.sw_zone_info[zidx].size, region1, sc->sge.hw_buf_info[hwidx].size, region3)); } fl->cll_def.zidx = zidx; fl->cll_def.hwidx = hwidx; fl->cll_def.region1 = region1; fl->cll_def.region3 = region3; } static void find_safe_refill_source(struct adapter *sc, struct sge_fl *fl) { struct sge *s = &sc->sge; struct hw_buf_info *hwb; struct sw_zone_info *swz; int spare; int8_t hwidx; if (fl->flags & FL_BUF_PACKING) hwidx = s->safe_hwidx2; /* with room for metadata */ else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { hwidx = s->safe_hwidx2; hwb = &s->hw_buf_info[hwidx]; swz = &s->sw_zone_info[hwb->zidx]; spare = swz->size - hwb->size; /* no good if there isn't room for an mbuf as well */ if (spare < CL_METADATA_SIZE + MSIZE) hwidx = s->safe_hwidx1; } else hwidx = s->safe_hwidx1; if (hwidx == -1) { /* No fallback source */ fl->cll_alt.hwidx = -1; fl->cll_alt.zidx = -1; return; } hwb = &s->hw_buf_info[hwidx]; swz = &s->sw_zone_info[hwb->zidx]; spare = swz->size - hwb->size; fl->cll_alt.hwidx = hwidx; fl->cll_alt.zidx = hwb->zidx; if (allow_mbufs_in_cluster && (fl_pad == 0 || (MSIZE % sc->params.sge.pad_boundary) == 0)) fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; else fl->cll_alt.region1 = 0; fl->cll_alt.region3 = spare - fl->cll_alt.region1; } static void add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) { mtx_lock(&sc->sfl_lock); FL_LOCK(fl); if ((fl->flags & FL_DOOMED) == 0) { fl->flags |= FL_STARVING; TAILQ_INSERT_TAIL(&sc->sfl, fl, link); callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); } FL_UNLOCK(fl); mtx_unlock(&sc->sfl_lock); } static void handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) { struct sge_wrq *wrq = (void *)eq; atomic_readandclear_int(&eq->equiq); taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); } static void handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) { struct sge_txq *txq = (void *)eq; MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); atomic_readandclear_int(&eq->equiq); mp_ring_check_drainage(txq->r, 0); taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); } static int handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); struct adapter *sc = iq->adapter; struct sge *s = &sc->sge; struct sge_eq *eq; static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, &handle_wrq_egr_update, &handle_eth_egr_update, &handle_wrq_egr_update}; KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, rss->opcode)); eq = s->eqmap[qid - s->eq_start - s->eq_base]; (*h[eq->flags & EQ_TYPEMASK])(sc, eq); return (0); } /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ offsetof(struct cpl_fw6_msg, data)); static int handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct adapter *sc = iq->adapter; const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, rss->opcode)); if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { const struct rss_header *rss2; rss2 = (const struct rss_header *)&cpl->data[0]; return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); } return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); } /** * t4_handle_wrerr_rpl - process a FW work request error message * @adap: the adapter * @rpl: start of the FW message */ static int t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; const struct fw_error_cmd *e = (const void *)rpl; unsigned int i; if (opcode != FW_ERROR_CMD) { log(LOG_ERR, "%s: Received WRERR_RPL message with opcode %#x\n", device_get_nameunit(adap->dev), opcode); return (EINVAL); } log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : "non-fatal"); switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { case FW_ERROR_TYPE_EXCEPTION: log(LOG_ERR, "exception info:\n"); for (i = 0; i < nitems(e->u.exception.info); i++) log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", be32toh(e->u.exception.info[i])); log(LOG_ERR, "\n"); break; case FW_ERROR_TYPE_HWMODULE: log(LOG_ERR, "HW module regaddr %08x regval %08x\n", be32toh(e->u.hwmodule.regaddr), be32toh(e->u.hwmodule.regval)); break; case FW_ERROR_TYPE_WR: log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", be16toh(e->u.wr.cidx), G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), be32toh(e->u.wr.eqid)); for (i = 0; i < nitems(e->u.wr.wrhdr); i++) log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", e->u.wr.wrhdr[i]); log(LOG_ERR, "\n"); break; case FW_ERROR_TYPE_ACL: log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", be16toh(e->u.acl.cidx), G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), be32toh(e->u.acl.eqid), G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : "MAC"); for (i = 0; i < nitems(e->u.acl.val); i++) log(LOG_ERR, " %02x", e->u.acl.val[i]); log(LOG_ERR, "\n"); break; default: log(LOG_ERR, "type %#x\n", G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); return (EINVAL); } return (0); } static int sysctl_uint16(SYSCTL_HANDLER_ARGS) { uint16_t *id = arg1; int i = *id; return sysctl_handle_int(oidp, &i, 0, req); } static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS) { struct sge *s = arg1; struct hw_buf_info *hwb = &s->hw_buf_info[0]; struct sw_zone_info *swz = &s->sw_zone_info[0]; int i, rc; struct sbuf sb; char c; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) c = '*'; else c = '\0'; sbuf_printf(&sb, "%u%c ", hwb->size, c); } sbuf_trim(&sb); sbuf_finish(&sb); rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (rc); } static int sysctl_tc(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct port_info *pi; struct adapter *sc; struct sge_txq *txq; struct tx_cl_rl_params *tc; int qidx = arg2, rc, tc_idx; uint32_t fw_queue, fw_class; MPASS(qidx >= 0 && qidx < vi->ntxq); pi = vi->pi; sc = pi->adapter; txq = &sc->sge.txq[vi->first_txq + qidx]; tc_idx = txq->tc_idx; rc = sysctl_handle_int(oidp, &tc_idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (sc->flags & IS_VF) return (EPERM); /* Note that -1 is legitimate input (it means unbind). */ if (tc_idx < -1 || tc_idx >= sc->chip_params->nsched_cls) return (EINVAL); mtx_lock(&sc->tc_lock); if (tc_idx == txq->tc_idx) { rc = 0; /* No change, nothing to do. */ goto done; } fw_queue = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id); if (tc_idx == -1) fw_class = 0xffffffff; /* Unbind. */ else { /* * Bind to a different class. */ tc = &pi->sched_params->cl_rl[tc_idx]; if (tc->flags & TX_CLRL_ERROR) { /* Previous attempt to set the cl-rl params failed. */ rc = EIO; goto done; } else { /* * Ok to proceed. Place a reference on the new class * while still holding on to the reference on the * previous class, if any. */ fw_class = tc_idx; tc->refcount++; } } mtx_unlock(&sc->tc_lock); rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4stc"); if (rc) return (rc); rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, &fw_class); end_synchronized_op(sc, 0); mtx_lock(&sc->tc_lock); if (rc == 0) { if (txq->tc_idx != -1) { tc = &pi->sched_params->cl_rl[txq->tc_idx]; MPASS(tc->refcount > 0); tc->refcount--; } txq->tc_idx = tc_idx; } else if (tc_idx != -1) { tc = &pi->sched_params->cl_rl[tc_idx]; MPASS(tc->refcount > 0); tc->refcount--; } done: mtx_unlock(&sc->tc_lock); return (rc); } Index: projects/clang500-import/sys/dev/etherswitch/e6000sw/e6000sw.c =================================================================== --- projects/clang500-import/sys/dev/etherswitch/e6000sw/e6000sw.c (revision 319548) +++ projects/clang500-import/sys/dev/etherswitch/e6000sw/e6000sw.c (revision 319549) @@ -1,1249 +1,1203 @@ /*- * Copyright (c) 2015 Semihalf * Copyright (c) 2015 Stormshield * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); -#include #include -#include -#include +#include +#include #include #include -#include #include -#include -#include -#include -#include -#include +#include +#include #include #include #include -#include -#include - -#include -#include -#include - #include -#include #include #include -#include #include #include -#include #include "e6000swreg.h" #include "etherswitch_if.h" #include "miibus_if.h" #include "mdio_if.h" MALLOC_DECLARE(M_E6000SW); MALLOC_DEFINE(M_E6000SW, "e6000sw", "e6000sw switch"); -#define E6000SW_LOCK(_sc) \ - sx_xlock(&(_sc)->sx) -#define E6000SW_UNLOCK(_sc) \ - sx_unlock(&(_sc)->sx) -#define E6000SW_LOCK_ASSERT(_sc, _what) \ - sx_assert(&(_sc)->sx, (_what)) -#define E6000SW_TRYLOCK(_sc) \ - sx_tryxlock(&(_sc)->sx) +#define E6000SW_LOCK(_sc) sx_xlock(&(_sc)->sx) +#define E6000SW_UNLOCK(_sc) sx_unlock(&(_sc)->sx) +#define E6000SW_LOCK_ASSERT(_sc, _what) sx_assert(&(_sc)->sx, (_what)) +#define E6000SW_TRYLOCK(_sc) sx_tryxlock(&(_sc)->sx) typedef struct e6000sw_softc { device_t dev; phandle_t node; struct sx sx; struct ifnet *ifp[E6000SW_MAX_PORTS]; char *ifname[E6000SW_MAX_PORTS]; device_t miibus[E6000SW_MAX_PORTS]; struct mii_data *mii[E6000SW_MAX_PORTS]; struct proc *kproc; uint32_t cpuports_mask; uint32_t fixed_mask; int sw_addr; int num_ports; boolean_t multi_chip; int vid[E6000SW_NUM_VGROUPS]; int members[E6000SW_NUM_VGROUPS]; int vgroup[E6000SW_MAX_PORTS]; } e6000sw_softc_t; static etherswitch_info_t etherswitch_info = { .es_nports = 0, .es_nvlangroups = E6000SW_NUM_VGROUPS, .es_name = "Marvell 6000 series switch" }; -static void e6000sw_identify(driver_t *driver, device_t parent); -static int e6000sw_probe(device_t dev); -static int e6000sw_attach(device_t dev); -static int e6000sw_detach(device_t dev); -static int e6000sw_readphy(device_t dev, int phy, int reg); -static int e6000sw_writephy(device_t dev, int phy, int reg, int data); -static etherswitch_info_t* e6000sw_getinfo(device_t dev); -static void e6000sw_lock(device_t dev); -static void e6000sw_unlock(device_t dev); -static int e6000sw_getport(device_t dev, etherswitch_port_t *p); -static int e6000sw_setport(device_t dev, etherswitch_port_t *p); -static int e6000sw_readreg_wrapper(device_t dev, int addr_reg); -static int e6000sw_writereg_wrapper(device_t dev, int addr_reg, int val); -static int e6000sw_readphy_wrapper(device_t dev, int phy, int reg); -static int e6000sw_writephy_wrapper(device_t dev, int phy, int reg, int data); -static int e6000sw_getvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg); -static int e6000sw_setvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg); -static int e6000sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg); -static int e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg); -static void e6000sw_setup(device_t dev, e6000sw_softc_t *sc); -static void e6000sw_port_vlan_conf(e6000sw_softc_t *sc); -static void e6000sw_tick(void *arg); -static void e6000sw_set_atustat(device_t dev, e6000sw_softc_t *sc, int bin, - int flag); -static int e6000sw_atu_flush(device_t dev, e6000sw_softc_t *sc, int flag); -static __inline void e6000sw_writereg(e6000sw_softc_t *sc, int addr, int reg, - int val); -static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *sc, int addr, - int reg); -static int e6000sw_ifmedia_upd(struct ifnet *ifp); -static void e6000sw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); -static int e6000sw_atu_mac_table(device_t dev, e6000sw_softc_t *sc, struct - atu_opt *atu, int flag); -static int e6000sw_get_pvid(e6000sw_softc_t *sc, int port, int *pvid); -static int e6000sw_set_pvid(e6000sw_softc_t *sc, int port, int pvid); -static __inline int e6000sw_is_cpuport(e6000sw_softc_t *sc, int port); -static __inline int e6000sw_is_fixedport(e6000sw_softc_t *sc, int port); -static __inline int e6000sw_is_phyport(e6000sw_softc_t *sc, int port); -static __inline struct mii_data *e6000sw_miiforphy(e6000sw_softc_t *sc, - unsigned int phy); +static void e6000sw_identify(driver_t *, device_t); +static int e6000sw_probe(device_t); +static int e6000sw_attach(device_t); +static int e6000sw_detach(device_t); +static int e6000sw_readphy(device_t, int, int); +static int e6000sw_writephy(device_t, int, int, int); +static etherswitch_info_t* e6000sw_getinfo(device_t); +static void e6000sw_lock(device_t); +static void e6000sw_unlock(device_t); +static int e6000sw_getport(device_t, etherswitch_port_t *); +static int e6000sw_setport(device_t, etherswitch_port_t *); +static int e6000sw_readreg_wrapper(device_t, int); +static int e6000sw_writereg_wrapper(device_t, int, int); +static int e6000sw_readphy_wrapper(device_t, int, int); +static int e6000sw_writephy_wrapper(device_t, int, int, int); +static int e6000sw_getvgroup_wrapper(device_t, etherswitch_vlangroup_t *); +static int e6000sw_setvgroup_wrapper(device_t, etherswitch_vlangroup_t *); +static int e6000sw_setvgroup(device_t, etherswitch_vlangroup_t *); +static int e6000sw_getvgroup(device_t, etherswitch_vlangroup_t *); +static void e6000sw_setup(device_t, e6000sw_softc_t *); +static void e6000sw_port_vlan_conf(e6000sw_softc_t *); +static void e6000sw_tick(void *); +static void e6000sw_set_atustat(device_t, e6000sw_softc_t *, int, int); +static int e6000sw_atu_flush(device_t, e6000sw_softc_t *, int); +static __inline void e6000sw_writereg(e6000sw_softc_t *, int, int, int); +static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *, int, int); +static int e6000sw_ifmedia_upd(struct ifnet *); +static void e6000sw_ifmedia_sts(struct ifnet *, struct ifmediareq *); +static int e6000sw_atu_mac_table(device_t, e6000sw_softc_t *, struct atu_opt *, + int); +static int e6000sw_get_pvid(e6000sw_softc_t *, int, int *); +static int e6000sw_set_pvid(e6000sw_softc_t *, int, int); +static __inline int e6000sw_is_cpuport(e6000sw_softc_t *, int); +static __inline int e6000sw_is_fixedport(e6000sw_softc_t *, int); +static __inline int e6000sw_is_phyport(e6000sw_softc_t *, int); +static __inline struct mii_data *e6000sw_miiforphy(e6000sw_softc_t *, + unsigned int); static device_method_t e6000sw_methods[] = { /* device interface */ DEVMETHOD(device_identify, e6000sw_identify), DEVMETHOD(device_probe, e6000sw_probe), DEVMETHOD(device_attach, e6000sw_attach), DEVMETHOD(device_detach, e6000sw_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* mii interface */ DEVMETHOD(miibus_readreg, e6000sw_readphy), DEVMETHOD(miibus_writereg, e6000sw_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, e6000sw_getinfo), DEVMETHOD(etherswitch_lock, e6000sw_lock), DEVMETHOD(etherswitch_unlock, e6000sw_unlock), DEVMETHOD(etherswitch_getport, e6000sw_getport), DEVMETHOD(etherswitch_setport, e6000sw_setport), DEVMETHOD(etherswitch_readreg, e6000sw_readreg_wrapper), DEVMETHOD(etherswitch_writereg, e6000sw_writereg_wrapper), DEVMETHOD(etherswitch_readphyreg, e6000sw_readphy_wrapper), DEVMETHOD(etherswitch_writephyreg, e6000sw_writephy_wrapper), DEVMETHOD(etherswitch_setvgroup, e6000sw_setvgroup_wrapper), DEVMETHOD(etherswitch_getvgroup, e6000sw_getvgroup_wrapper), DEVMETHOD_END }; static devclass_t e6000sw_devclass; DEFINE_CLASS_0(e6000sw, e6000sw_driver, e6000sw_methods, sizeof(e6000sw_softc_t)); DRIVER_MODULE(e6000sw, mdio, e6000sw_driver, e6000sw_devclass, 0, 0); DRIVER_MODULE(etherswitch, e6000sw, etherswitch_driver, etherswitch_devclass, 0, 0); DRIVER_MODULE(miibus, e6000sw, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(e6000sw, mdio, 1, 1, 1); -#define SMI_CMD 0 -#define SMI_CMD_BUSY (1<<15) -#define SMI_CMD_OP_READ ((2<<10)|SMI_CMD_BUSY|(1<<12)) -#define SMI_CMD_OP_WRITE ((1<<10)|SMI_CMD_BUSY|(1<<12)) -#define SMI_DATA 1 +#define SMI_CMD 0 +#define SMI_CMD_BUSY (1 << 15) +#define SMI_CMD_OP_READ ((2 << 10) | SMI_CMD_BUSY | (1 << 12)) +#define SMI_CMD_OP_WRITE ((1 << 10) | SMI_CMD_BUSY | (1 << 12)) +#define SMI_DATA 1 -#define MDIO_READ(dev, addr, reg) MDIO_READREG(device_get_parent(dev), (addr), (reg)) -#define MDIO_WRITE(dev, addr, reg, val) MDIO_WRITEREG(device_get_parent(dev), (addr), (reg), (val)) +#define MDIO_READ(dev, addr, reg) \ + MDIO_READREG(device_get_parent(dev), (addr), (reg)) +#define MDIO_WRITE(dev, addr, reg, val) \ + MDIO_WRITEREG(device_get_parent(dev), (addr), (reg), (val)) + static void e6000sw_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "e6000sw", -1) == NULL) BUS_ADD_CHILD(parent, 0, "e6000sw", -1); } static int e6000sw_probe(device_t dev) { e6000sw_softc_t *sc; const char *description; - unsigned int id; phandle_t dsa_node, switch_node; + uint32_t id; dsa_node = fdt_find_compatible(OF_finddevice("/"), "marvell,dsa", 0); switch_node = OF_child(dsa_node); if (switch_node == 0) return (ENXIO); sc = device_get_softc(dev); bzero(sc, sizeof(e6000sw_softc_t)); sc->dev = dev; sc->node = switch_node; if (OF_getencprop(sc->node, "reg", &sc->sw_addr, sizeof(sc->sw_addr)) < 0) return (ENXIO); if (sc->sw_addr != 0 && (sc->sw_addr % 2) == 0) sc->multi_chip = true; /* Lock is necessary due to assertions. */ sx_init(&sc->sx, "e6000sw"); E6000SW_LOCK(sc); - id = e6000sw_readreg(sc, REG_PORT(0), SWITCH_ID); + E6000SW_UNLOCK(sc); switch (id & 0xfff0) { case 0x3520: description = "Marvell 88E6352"; break; case 0x1720: description = "Marvell 88E6172"; break; case 0x1760: description = "Marvell 88E6176"; break; default: - E6000SW_UNLOCK(sc); sx_destroy(&sc->sx); device_printf(dev, "Unrecognized device, id 0x%x.\n", id); return (ENXIO); } device_set_desc(dev, description); - E6000SW_UNLOCK(sc); - return (BUS_PROBE_DEFAULT); } static int e6000sw_parse_child_fdt(device_t dev, phandle_t child, uint32_t *fixed_mask, uint32_t *cpu_mask, int *pport, int *pvlangroup) { char portlabel[100]; uint32_t port, vlangroup; boolean_t fixed_link; if (fixed_mask == NULL || cpu_mask == NULL || pport == NULL) return (ENXIO); OF_getprop(child, "label", (void *)portlabel, 100); OF_getencprop(child, "reg", (void *)&port, sizeof(port)); if (OF_getencprop(child, "vlangroup", (void *)&vlangroup, sizeof(vlangroup)) > 0) { if (vlangroup >= E6000SW_NUM_VGROUPS) return (ENXIO); *pvlangroup = vlangroup; } else { *pvlangroup = -1; } if (port >= E6000SW_MAX_PORTS) return (ENXIO); *pport = port; if (strncmp(portlabel, "cpu", 3) == 0) { device_printf(dev, "CPU port at %d\n", port); *cpu_mask |= (1 << port); return (0); } fixed_link = OF_child(child); if (fixed_link) { *fixed_mask |= (1 << port); device_printf(dev, "fixed port at %d\n", port); } else { device_printf(dev, "PHY at %d\n", port); } return (0); } static int e6000sw_init_interface(e6000sw_softc_t *sc, int port) { char name[IFNAMSIZ]; snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev)); sc->ifp[port] = if_alloc(IFT_ETHER); if (sc->ifp[port] == NULL) return (ENOMEM); sc->ifp[port]->if_softc = sc; sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX; sc->ifname[port] = malloc(strlen(name) + 1, M_E6000SW, M_NOWAIT); if (sc->ifname[port] == NULL) { if_free(sc->ifp[port]); return (ENOMEM); } memcpy(sc->ifname[port], name, strlen(name) + 1); if_initname(sc->ifp[port], sc->ifname[port], port); return (0); } static int e6000sw_attach_miibus(e6000sw_softc_t *sc, int port) { int err; err = mii_attach(sc->dev, &sc->miibus[port], sc->ifp[port], e6000sw_ifmedia_upd, e6000sw_ifmedia_sts, BMSR_DEFCAPMASK, port, MII_OFFSET_ANY, 0); if (err != 0) return (err); sc->mii[port] = device_get_softc(sc->miibus[port]); return (0); } static int e6000sw_attach(device_t dev) { e6000sw_softc_t *sc; phandle_t child; int err, port, vlangroup; int member_ports[E6000SW_NUM_VGROUPS]; etherswitch_vlangroup_t vg; err = 0; sc = device_get_softc(dev); if (sc->multi_chip) device_printf(dev, "multi-chip addressing mode\n"); else device_printf(dev, "single-chip addressing mode\n"); E6000SW_LOCK(sc); e6000sw_setup(dev, sc); bzero(member_ports, sizeof(member_ports)); for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { err = e6000sw_parse_child_fdt(dev, child, &sc->fixed_mask, &sc->cpuports_mask, &port, &vlangroup); if (err != 0) { device_printf(sc->dev, "failed to parse DTS\n"); goto out_fail; } if (vlangroup != -1) member_ports[vlangroup] |= (1 << port); sc->num_ports++; err = e6000sw_init_interface(sc, port); if (err != 0) { device_printf(sc->dev, "failed to init interface\n"); goto out_fail; } /* Don't attach miibus at CPU/fixed ports */ if (!e6000sw_is_phyport(sc, port)) continue; err = e6000sw_attach_miibus(sc, port); if (err != 0) { device_printf(sc->dev, "failed to attach miibus\n"); goto out_fail; } } etherswitch_info.es_nports = sc->num_ports; for (port = 0; port < sc->num_ports; port++) sc->vgroup[port] = E6000SW_PORT_NO_VGROUP; /* Set VLAN configuration */ e6000sw_port_vlan_conf(sc); /* Set vlangroups */ for (vlangroup = 0; vlangroup < E6000SW_NUM_VGROUPS; vlangroup++) if (member_ports[vlangroup] != 0) { vg.es_vlangroup = vg.es_vid = vlangroup; vg.es_member_ports = vg.es_untagged_ports = member_ports[vlangroup]; e6000sw_setvgroup(dev, &vg); } E6000SW_UNLOCK(sc); bus_generic_probe(dev); bus_generic_attach(dev); kproc_create(e6000sw_tick, sc, &sc->kproc, 0, 0, "e6000sw tick kproc"); return (0); out_fail: E6000SW_UNLOCK(sc); e6000sw_detach(dev); return (err); } static __inline int e6000sw_poll_done(e6000sw_softc_t *sc) { int i; - for (i = 0; i < 16; i++) { + for (i = 0; i < E6000SW_SMI_TIMEOUT; i++) { - if (!(e6000sw_readreg(sc, REG_GLOBAL2, PHY_CMD) & + if (!(e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG) & (1 << PHY_CMD_SMI_BUSY))) return (0); pause("e6000sw PHY poll", hz/1000); } return (ETIMEDOUT); } /* * PHY registers are paged. Put page index in reg 22 (accessible from every * page), then access specific register. */ static int e6000sw_readphy(device_t dev, int phy, int reg) { e6000sw_softc_t *sc; uint32_t val; int err; sc = device_get_softc(dev); - val = 0; - if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); err = e6000sw_poll_done(sc); if (err != 0) { device_printf(dev, "Timeout while waiting for switch\n"); return (err); } - val |= 1 << PHY_CMD_SMI_BUSY; + val = 1 << PHY_CMD_SMI_BUSY; val |= PHY_CMD_MODE_MDIO << PHY_CMD_MODE; val |= PHY_CMD_OPCODE_READ << PHY_CMD_OPCODE; val |= (reg << PHY_CMD_REG_ADDR) & PHY_CMD_REG_ADDR_MASK; val |= (phy << PHY_CMD_DEV_ADDR) & PHY_CMD_DEV_ADDR_MASK; e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, val); err = e6000sw_poll_done(sc); if (err != 0) { device_printf(dev, "Timeout while waiting for switch\n"); return (err); } - val = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG) - & PHY_DATA_MASK; + val = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG); - return (val); + return (val & PHY_DATA_MASK); } static int e6000sw_writephy(device_t dev, int phy, int reg, int data) { e6000sw_softc_t *sc; uint32_t val; int err; sc = device_get_softc(dev); - val = 0; - if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); err = e6000sw_poll_done(sc); if (err != 0) { device_printf(dev, "Timeout while waiting for switch\n"); return (err); } + val = 1 << PHY_CMD_SMI_BUSY; val |= PHY_CMD_MODE_MDIO << PHY_CMD_MODE; - val |= 1 << PHY_CMD_SMI_BUSY; val |= PHY_CMD_OPCODE_WRITE << PHY_CMD_OPCODE; val |= (reg << PHY_CMD_REG_ADDR) & PHY_CMD_REG_ADDR_MASK; val |= (phy << PHY_CMD_DEV_ADDR) & PHY_CMD_DEV_ADDR_MASK; e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, - data & PHY_DATA_MASK); + data & PHY_DATA_MASK); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, val); err = e6000sw_poll_done(sc); - if (err != 0) { + if (err != 0) device_printf(dev, "Timeout while waiting for switch\n"); - return (err); - } - return (0); + return (err); } static int e6000sw_detach(device_t dev) { int phy; e6000sw_softc_t *sc; sc = device_get_softc(dev); bus_generic_detach(dev); sx_destroy(&sc->sx); for (phy = 0; phy < sc->num_ports; phy++) { if (sc->miibus[phy] != NULL) device_delete_child(dev, sc->miibus[phy]); if (sc->ifp[phy] != NULL) if_free(sc->ifp[phy]); if (sc->ifname[phy] != NULL) free(sc->ifname[phy], M_E6000SW); } return (0); } static etherswitch_info_t* e6000sw_getinfo(device_t dev) { return (ðerswitch_info); } static void e6000sw_lock(device_t dev) { struct e6000sw_softc *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); } static void e6000sw_unlock(device_t dev) { struct e6000sw_softc *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); E6000SW_UNLOCK(sc); } static int e6000sw_getport(device_t dev, etherswitch_port_t *p) { struct mii_data *mii; int err; struct ifmediareq *ifmr; - err = 0; e6000sw_softc_t *sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); - E6000SW_LOCK(sc); + if (p->es_port >= sc->num_ports || p->es_port < 0) + return (EINVAL); - if (p->es_port >= sc->num_ports || - p->es_port < 0) { - err = EINVAL; - goto out; - } - + err = 0; + E6000SW_LOCK(sc); e6000sw_get_pvid(sc, p->es_port, &p->es_pvid); if (e6000sw_is_cpuport(sc, p->es_port)) { p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; } else if (e6000sw_is_fixedport(sc, p->es_port)) { ifmr = &p->es_ifmr; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; } else { mii = e6000sw_miiforphy(sc, p->es_port); err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); } - -out: E6000SW_UNLOCK(sc); + return (err); } static int e6000sw_setport(device_t dev, etherswitch_port_t *p) { e6000sw_softc_t *sc; int err; struct mii_data *mii; - err = 0; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); - E6000SW_LOCK(sc); + if (p->es_port >= sc->num_ports || p->es_port < 0) + return (EINVAL); - if (p->es_port >= sc->num_ports || - p->es_port < 0) { - err = EINVAL; - goto out; - } - + err = 0; + E6000SW_LOCK(sc); if (p->es_pvid != 0) e6000sw_set_pvid(sc, p->es_port, p->es_pvid); if (!e6000sw_is_cpuport(sc, p->es_port)) { mii = e6000sw_miiforphy(sc, p->es_port); err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCSIFMEDIA); } - -out: E6000SW_UNLOCK(sc); + return (err); } /* * Registers in this switch are divided into sections, specified in * documentation. So as to access any of them, section index and reg index * is necessary. etherswitchcfg uses only one variable, so indexes were * compressed into addr_reg: 32 * section_index + reg_index. */ static int e6000sw_readreg_wrapper(device_t dev, int addr_reg) { if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) || (addr_reg < (REG_PORT(0) * 32))) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } return (e6000sw_readreg(device_get_softc(dev), addr_reg / 32, addr_reg % 32)); } static int e6000sw_writereg_wrapper(device_t dev, int addr_reg, int val) { if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) || (addr_reg < (REG_PORT(0) * 32))) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } e6000sw_writereg(device_get_softc(dev), addr_reg / 5, addr_reg % 32, val); return (0); } /* * These wrappers are necessary because PHY accesses from etherswitchcfg * need to be synchronized with locks, while miibus PHY accesses do not. */ static int e6000sw_readphy_wrapper(device_t dev, int phy, int reg) { e6000sw_softc_t *sc; int ret; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); ret = e6000sw_readphy(dev, phy, reg); E6000SW_UNLOCK(sc); return (ret); } static int e6000sw_writephy_wrapper(device_t dev, int phy, int reg, int data) { e6000sw_softc_t *sc; int ret; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); ret = e6000sw_writephy(dev, phy, reg, data); E6000SW_UNLOCK(sc); return (ret); } /* * setvgroup/getvgroup called from etherswitchfcg need to be locked, * while internal calls do not. */ static int e6000sw_setvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; int ret; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); ret = e6000sw_setvgroup(dev, vg); E6000SW_UNLOCK(sc); return (ret); } static int e6000sw_getvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; int ret; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); ret = e6000sw_getvgroup(dev, vg); E6000SW_UNLOCK(sc); return (ret); } static __inline void e6000sw_flush_port(e6000sw_softc_t *sc, int port) { uint32_t reg; - reg = e6000sw_readreg(sc, REG_PORT(port), - PORT_VLAN_MAP); + reg = e6000sw_readreg(sc, REG_PORT(port), PORT_VLAN_MAP); reg &= ~PORT_VLAN_MAP_TABLE_MASK; reg &= ~PORT_VLAN_MAP_FID_MASK; - e6000sw_writereg(sc, REG_PORT(port), - PORT_VLAN_MAP, reg); + e6000sw_writereg(sc, REG_PORT(port), PORT_VLAN_MAP, reg); if (sc->vgroup[port] != E6000SW_PORT_NO_VGROUP) { /* * If port belonged somewhere, owner-group * should have its entry removed. */ sc->members[sc->vgroup[port]] &= ~(1 << port); sc->vgroup[port] = E6000SW_PORT_NO_VGROUP; } } static __inline void e6000sw_port_assign_vgroup(e6000sw_softc_t *sc, int port, int fid, int vgroup, int members) { uint32_t reg; - reg = e6000sw_readreg(sc, REG_PORT(port), - PORT_VLAN_MAP); + reg = e6000sw_readreg(sc, REG_PORT(port), PORT_VLAN_MAP); reg &= ~PORT_VLAN_MAP_TABLE_MASK; reg &= ~PORT_VLAN_MAP_FID_MASK; reg |= members & ~(1 << port); reg |= (fid << PORT_VLAN_MAP_FID) & PORT_VLAN_MAP_FID_MASK; - e6000sw_writereg(sc, REG_PORT(port), PORT_VLAN_MAP, - reg); + e6000sw_writereg(sc, REG_PORT(port), PORT_VLAN_MAP, reg); sc->vgroup[port] = vgroup; } static int e6000sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; int port, fid; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (vg->es_vlangroup >= E6000SW_NUM_VGROUPS) return (EINVAL); if (vg->es_member_ports != vg->es_untagged_ports) { device_printf(dev, "Tagged ports not supported.\n"); return (EINVAL); } vg->es_untagged_ports &= PORT_VLAN_MAP_TABLE_MASK; fid = vg->es_vlangroup + 1; for (port = 0; port < sc->num_ports; port++) { if ((sc->members[vg->es_vlangroup] & (1 << port)) || (vg->es_untagged_ports & (1 << port))) e6000sw_flush_port(sc, port); if (vg->es_untagged_ports & (1 << port)) e6000sw_port_assign_vgroup(sc, port, fid, vg->es_vlangroup, vg->es_untagged_ports); } sc->vid[vg->es_vlangroup] = vg->es_vid; sc->members[vg->es_vlangroup] = vg->es_untagged_ports; return (0); } static int e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (vg->es_vlangroup >= E6000SW_NUM_VGROUPS) return (EINVAL); vg->es_untagged_ports = vg->es_member_ports = sc->members[vg->es_vlangroup]; vg->es_vid = ETHERSWITCH_VID_VALID; return (0); } static __inline struct mii_data* e6000sw_miiforphy(e6000sw_softc_t *sc, unsigned int phy) { if (!e6000sw_is_phyport(sc, phy)) return (NULL); return (device_get_softc(sc->miibus[phy])); } static int e6000sw_ifmedia_upd(struct ifnet *ifp) { e6000sw_softc_t *sc; struct mii_data *mii; sc = ifp->if_softc; mii = e6000sw_miiforphy(sc, ifp->if_dunit); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void e6000sw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { e6000sw_softc_t *sc; struct mii_data *mii; sc = ifp->if_softc; mii = e6000sw_miiforphy(sc, ifp->if_dunit); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } - static int e6000sw_smi_waitready(e6000sw_softc_t *sc, int phy) { int i; for (i = 0; i < E6000SW_SMI_TIMEOUT; i++) { - if ((MDIO_READ(sc->dev, phy, SMI_CMD) - & SMI_CMD_BUSY) == 0) - return 0; + if ((MDIO_READ(sc->dev, phy, SMI_CMD) & SMI_CMD_BUSY) == 0) + return (0); + DELAY(1); } - return 1; + return (1); } static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *sc, int addr, int reg) { E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!sc->multi_chip) return (MDIO_READ(sc->dev, addr, reg) & 0xffff); if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return (0xffff); } - MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD, SMI_CMD_OP_READ | - (addr << 5) | reg); + MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD, + SMI_CMD_OP_READ | (addr << 5) | reg); if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return (0xffff); } return (MDIO_READ(sc->dev, sc->sw_addr, SMI_DATA) & 0xffff); } static __inline void e6000sw_writereg(e6000sw_softc_t *sc, int addr, int reg, int val) { E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!sc->multi_chip) { MDIO_WRITE(sc->dev, addr, reg, val); return; } if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return; } MDIO_WRITE(sc->dev, sc->sw_addr, SMI_DATA, val); - MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD, SMI_CMD_OP_WRITE | - (addr << 5) | reg); + MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD, + SMI_CMD_OP_WRITE | (addr << 5) | reg); if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return; } - - return; } static __inline int e6000sw_is_cpuport(e6000sw_softc_t *sc, int port) { return (sc->cpuports_mask & (1 << port)); } static __inline int e6000sw_is_fixedport(e6000sw_softc_t *sc, int port) { return (sc->fixed_mask & (1 << port)); } static __inline int e6000sw_is_phyport(e6000sw_softc_t *sc, int port) { uint32_t phy_mask; phy_mask = ~(sc->fixed_mask | sc->cpuports_mask); return (phy_mask & (1 << port)); } static __inline int e6000sw_set_pvid(e6000sw_softc_t *sc, int port, int pvid) { e6000sw_writereg(sc, REG_PORT(port), PORT_VID, pvid & PORT_VID_DEF_VID_MASK); return (0); } static __inline int e6000sw_get_pvid(e6000sw_softc_t *sc, int port, int *pvid) { if (pvid == NULL) return (ENXIO); *pvid = e6000sw_readreg(sc, REG_PORT(port), PORT_VID) & PORT_VID_DEF_VID_MASK; return (0); } /* * Convert port status to ifmedia. */ static void e6000sw_update_ifmedia(uint16_t portstatus, u_int *media_status, u_int *media_active) { *media_active = IFM_ETHER; *media_status = IFM_AVALID; if ((portstatus & PORT_STATUS_LINK_MASK) != 0) *media_status |= IFM_ACTIVE; else { *media_active |= IFM_NONE; return; } switch (portstatus & PORT_STATUS_SPEED_MASK) { case PORT_STATUS_SPEED_10: *media_active |= IFM_10_T; break; case PORT_STATUS_SPEED_100: *media_active |= IFM_100_TX; break; case PORT_STATUS_SPEED_1000: *media_active |= IFM_1000_T; break; } if ((portstatus & PORT_STATUS_DUPLEX_MASK) == 0) *media_active |= IFM_FDX; else *media_active |= IFM_HDX; } static void e6000sw_tick (void *arg) { e6000sw_softc_t *sc; struct mii_softc *miisc; uint16_t portstatus; int port; sc = arg; E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); for (;;) { E6000SW_LOCK(sc); for (port = 0; port < sc->num_ports; port++) { /* Tick only on PHY ports */ if (!e6000sw_is_phyport(sc, port)) continue; portstatus = e6000sw_readreg(sc, REG_PORT(port), PORT_STATUS); e6000sw_update_ifmedia(portstatus, &sc->mii[port]->mii_media_status, &sc->mii[port]->mii_media_active); LIST_FOREACH(miisc, &sc->mii[port]->mii_phys, mii_list) { if (IFM_INST(sc->mii[port]->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; mii_phy_update(miisc, MII_POLLSTAT); } } E6000SW_UNLOCK(sc); pause("e6000sw tick", 1000); } } static void e6000sw_setup(device_t dev, e6000sw_softc_t *sc) { uint16_t atu_ctrl, atu_age; /* Set aging time */ e6000sw_writereg(sc, REG_GLOBAL, ATU_CONTROL, (E6000SW_DEFAULT_AGETIME << ATU_CONTROL_AGETIME) | (1 << ATU_CONTROL_LEARN2ALL)); /* Send all with specific mac address to cpu port */ e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_2x, MGMT_EN_ALL); e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_0x, MGMT_EN_ALL); /* Disable Remote Management */ e6000sw_writereg(sc, REG_GLOBAL, SWITCH_GLOBAL_CONTROL2, 0); /* Disable loopback filter and flow control messages */ e6000sw_writereg(sc, REG_GLOBAL2, SWITCH_MGMT, SWITCH_MGMT_PRI_MASK | (1 << SWITCH_MGMT_RSVD2CPU) | SWITCH_MGMT_FC_PRI_MASK | (1 << SWITCH_MGMT_FORCEFLOW)); e6000sw_atu_flush(dev, sc, NO_OPERATION); e6000sw_atu_mac_table(dev, sc, NULL, NO_OPERATION); e6000sw_set_atustat(dev, sc, 0, COUNT_ALL); /* Set ATU AgeTime to 15 seconds */ atu_age = 1; atu_ctrl = e6000sw_readreg(sc, REG_GLOBAL, ATU_CONTROL); /* Set new AgeTime field */ atu_ctrl &= ~ATU_CONTROL_AGETIME_MASK; e6000sw_writereg(sc, REG_GLOBAL, ATU_CONTROL, atu_ctrl | (atu_age << ATU_CONTROL_AGETIME)); } static void e6000sw_port_vlan_conf(e6000sw_softc_t *sc) { int port, ret; device_t dev; dev = sc->dev; /* Disable all ports */ for (port = 0; port < sc->num_ports; port++) { ret = e6000sw_readreg(sc, REG_PORT(port), PORT_CONTROL); e6000sw_writereg(sc, REG_PORT(port), PORT_CONTROL, (ret & ~PORT_CONTROL_ENABLE)); } /* Set port priority */ for (port = 0; port < sc->num_ports; port++) { ret = e6000sw_readreg(sc, REG_PORT(port), PORT_VID); ret &= ~PORT_VID_PRIORITY_MASK; e6000sw_writereg(sc, REG_PORT(port), PORT_VID, ret); } /* Set VID map */ for (port = 0; port < sc->num_ports; port++) { ret = e6000sw_readreg(sc, REG_PORT(port), PORT_VID); ret &= ~PORT_VID_DEF_VID_MASK; ret |= (port + 1); e6000sw_writereg(sc, REG_PORT(port), PORT_VID, ret); } /* Enable all ports */ for (port = 0; port < sc->num_ports; port++) { ret = e6000sw_readreg(sc, REG_PORT(port), PORT_CONTROL); e6000sw_writereg(sc, REG_PORT(port), PORT_CONTROL, (ret | PORT_CONTROL_ENABLE)); } } static void e6000sw_set_atustat(device_t dev, e6000sw_softc_t *sc, int bin, int flag) { uint16_t ret; ret = e6000sw_readreg(sc, REG_GLOBAL2, ATU_STATS); e6000sw_writereg(sc, REG_GLOBAL2, ATU_STATS, (bin << ATU_STATS_BIN ) | (flag << ATU_STATS_FLAG)); } static int e6000sw_atu_mac_table(device_t dev, e6000sw_softc_t *sc, struct atu_opt *atu, int flag) { uint16_t ret_opt; uint16_t ret_data; int retries; if (flag == NO_OPERATION) return (0); else if ((flag & (LOAD_FROM_FIB | PURGE_FROM_FIB | GET_NEXT_IN_FIB | GET_VIOLATION_DATA | CLEAR_VIOLATION_DATA)) == 0) { device_printf(dev, "Wrong Opcode for ATU operation\n"); return (EINVAL); } ret_opt = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION); if (ret_opt & ATU_UNIT_BUSY) { device_printf(dev, "ATU unit is busy, cannot access" "register\n"); return (EBUSY); } else { if(flag & LOAD_FROM_FIB) { ret_data = e6000sw_readreg(sc, REG_GLOBAL, ATU_DATA); e6000sw_writereg(sc, REG_GLOBAL2, ATU_DATA, (ret_data & ~ENTRY_STATE)); } e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR01, atu->mac_01); e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR23, atu->mac_23); e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR45, atu->mac_45); e6000sw_writereg(sc, REG_GLOBAL, ATU_FID, atu->fid); e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, (ret_opt | ATU_UNIT_BUSY | flag)); retries = E6000SW_RETRIES; while (--retries & (e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION) & ATU_UNIT_BUSY)) DELAY(1); if (retries == 0) device_printf(dev, "Timeout while flushing\n"); else if (flag & GET_NEXT_IN_FIB) { atu->mac_01 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR01); atu->mac_23 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR23); atu->mac_45 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR45); } } return (0); } static int e6000sw_atu_flush(device_t dev, e6000sw_softc_t *sc, int flag) { uint16_t ret; int retries; if (flag == NO_OPERATION) return (0); ret = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION); if (ret & ATU_UNIT_BUSY) { device_printf(dev, "Atu unit is busy, cannot flush\n"); return (EBUSY); } else { e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, (ret | ATU_UNIT_BUSY | flag)); retries = E6000SW_RETRIES; while (--retries & (e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION) & ATU_UNIT_BUSY)) DELAY(1); if (retries == 0) device_printf(dev, "Timeout while flushing\n"); } return (0); } Index: projects/clang500-import/sys/dev/etherswitch/e6000sw/e6000swreg.h =================================================================== --- projects/clang500-import/sys/dev/etherswitch/e6000sw/e6000swreg.h (revision 319548) +++ projects/clang500-import/sys/dev/etherswitch/e6000sw/e6000swreg.h (revision 319549) @@ -1,193 +1,191 @@ /*- * Copyright (c) 2015 Semihalf * Copyright (c) 2015 Stormshield * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _E6000SWREG_H_ #define _E6000SWREG_H_ struct atu_opt { uint16_t mac_01; uint16_t mac_23; uint16_t mac_45; uint16_t fid; }; /* * Definitions for the Marvell 88E6000 series Ethernet Switch. */ /* * Switch Registers */ #define REG_GLOBAL 0x1b #define REG_GLOBAL2 0x1c #define REG_PORT(p) (0x10 + (p)) #define REG_NUM_MAX 31 /* * Per-Port Switch Registers */ #define PORT_STATUS 0x0 #define PORT_STATUS_SPEED_MASK 0x300 #define PORT_STATUS_SPEED_10 0 #define PORT_STATUS_SPEED_100 1 #define PORT_STATUS_SPEED_1000 2 #define PORT_STATUS_DUPLEX_MASK (1 << 10) #define PORT_STATUS_LINK_MASK (1 << 11) #define PORT_STATUS_PHY_DETECT_MASK (1 << 12) #define PSC_CONTROL 0x1 #define SWITCH_ID 0x3 #define PORT_CONTROL 0x4 #define PORT_CONTROL_1 0x5 #define PORT_VLAN_MAP 0x6 #define PORT_VID 0x7 #define PORT_ASSOCIATION_VECTOR 0xb #define PORT_ATU_CTRL 0xc #define RX_COUNTER 0x12 #define TX_COUNTER 0x13 #define PORT_VID_DEF_VID 0 #define PORT_VID_DEF_VID_MASK 0xfff #define PORT_VID_PRIORITY_MASK 0xc00 #define PORT_CONTROL_ENABLE 0x3 /* PORT_VLAN fields */ #define PORT_VLAN_MAP_TABLE_MASK 0x7f #define PORT_VLAN_MAP_FID 12 #define PORT_VLAN_MAP_FID_MASK 0xf000 /* * Switch Global Register 1 accessed via REG_GLOBAL_ADDR */ #define SWITCH_GLOBAL_STATUS 0 #define SWITCH_GLOBAL_CONTROL 4 #define SWITCH_GLOBAL_CONTROL2 28 #define MONITOR_CONTROL 26 /* ATU operation */ #define ATU_FID 1 #define ATU_CONTROL 10 #define ATU_OPERATION 11 #define ATU_DATA 12 #define ATU_MAC_ADDR01 13 #define ATU_MAC_ADDR23 14 #define ATU_MAC_ADDR45 15 #define ATU_UNIT_BUSY (1 << 15) #define ENTRY_STATE 0xf /* ATU_CONTROL fields */ #define ATU_CONTROL_AGETIME 4 #define ATU_CONTROL_AGETIME_MASK 0xff0 #define ATU_CONTROL_LEARN2ALL 3 /* ATU opcode */ #define NO_OPERATION (0 << 0) #define FLUSH_ALL (1 << 0) #define FLUSH_NON_STATIC (1 << 1) #define LOAD_FROM_FIB (3 << 0) #define PURGE_FROM_FIB (3 << 0) #define GET_NEXT_IN_FIB (1 << 2) #define FLUSH_ALL_IN_FIB (5 << 0) #define FLUSH_NON_STATIC_IN_FIB (3 << 1) #define GET_VIOLATION_DATA (7 << 0) #define CLEAR_VIOLATION_DATA (7 << 0) /* ATU Stats */ #define COUNT_ALL (0 << 0) /* * Switch Global Register 2 accessed via REG_GLOBAL2_ADDR */ #define MGMT_EN_2x 2 #define MGMT_EN_0x 3 #define SWITCH_MGMT 5 #define ATU_STATS 14 #define MGMT_EN_ALL 0xffff /* SWITCH_MGMT fields */ #define SWITCH_MGMT_PRI 0 #define SWITCH_MGMT_PRI_MASK 7 #define SWITCH_MGMT_RSVD2CPU 3 #define SWITCH_MGMT_FC_PRI 4 #define SWITCH_MGMT_FC_PRI_MASK (7 << 4) #define SWITCH_MGMT_FORCEFLOW 7 /* ATU_STATS fields */ #define ATU_STATS_BIN 14 #define ATU_STATS_FLAG 12 /* * PHY registers accessed via 'Switch Global Registers' (REG_GLOBAL2). */ #define SMI_PHY_CMD_REG 0x18 #define SMI_PHY_DATA_REG 0x19 -#define PHY_CMD 0x18 -#define PHY_DATA 0x19 #define PHY_DATA_MASK 0xffff #define PHY_CMD_SMI_BUSY 15 #define PHY_CMD_MODE 12 #define PHY_CMD_MODE_MDIO 1 #define PHY_CMD_MODE_XMDIO 0 #define PHY_CMD_OPCODE 10 #define PHY_CMD_OPCODE_WRITE 1 #define PHY_CMD_OPCODE_READ 2 #define PHY_CMD_DEV_ADDR 5 #define PHY_CMD_DEV_ADDR_MASK 0x3e0 #define PHY_CMD_REG_ADDR 0 #define PHY_CMD_REG_ADDR_MASK 0x1f #define PHY_PAGE_REG 22 /* * Scratch and Misc register accessed via * 'Switch Global Registers' (REG_GLOBAL2) */ #define SCR_AND_MISC_REG 0x1a #define SCR_AND_MISC_PTR_CFG 0x7000 #define SCR_AND_MISC_DATA_CFG_MASK 0xf0 #define E6000SW_NUM_PHY_REGS 29 #define E6000SW_NUM_VGROUPS 8 #define E6000SW_MAX_PORTS 10 #define E6000SW_PORT_NO_VGROUP -1 #define E6000SW_DEFAULT_AGETIME 20 #define E6000SW_RETRIES 100 #define E6000SW_SMI_TIMEOUT 16 #endif /* _E6000SWREG_H_ */ Index: projects/clang500-import/sys/dev/uart/uart_bus_fdt.c =================================================================== --- projects/clang500-import/sys/dev/uart/uart_bus_fdt.c (revision 319548) +++ projects/clang500-import/sys/dev/uart/uart_bus_fdt.c (revision 319549) @@ -1,274 +1,283 @@ /*- * Copyright (c) 2009-2010 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include static int uart_fdt_probe(device_t); static device_method_t uart_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, uart_fdt_probe), DEVMETHOD(device_attach, uart_bus_attach), DEVMETHOD(device_detach, uart_bus_detach), { 0, 0 } }; static driver_t uart_fdt_driver = { uart_driver_name, uart_fdt_methods, sizeof(struct uart_softc), }; int uart_fdt_get_clock(phandle_t node, pcell_t *cell) { /* clock-frequency is a FreeBSD-only extension. */ if ((OF_getencprop(node, "clock-frequency", cell, sizeof(*cell))) <= 0) { /* Try to retrieve parent 'bus-frequency' */ /* XXX this should go to simple-bus fixup or so */ if ((OF_getencprop(OF_parent(node), "bus-frequency", cell, sizeof(*cell))) <= 0) *cell = 0; } return (0); } int uart_fdt_get_shift(phandle_t node, pcell_t *cell) { if ((OF_getencprop(node, "reg-shift", cell, sizeof(*cell))) <= 0) return (-1); return (0); } int uart_fdt_get_io_width(phandle_t node, pcell_t *cell) { if ((OF_getencprop(node, "reg-io-width", cell, sizeof(*cell))) <= 0) return (-1); return (0); } static uintptr_t uart_fdt_find_device(device_t dev) { struct ofw_compat_data **cd; const struct ofw_compat_data *ocd; SET_FOREACH(cd, uart_fdt_class_and_device_set) { ocd = ofw_bus_search_compatible(dev, *cd); if (ocd->ocd_data != 0) return (ocd->ocd_data); } return (0); } static int phandle_chosen_propdev(phandle_t chosen, const char *name, phandle_t *node) { char buf[64]; + char *sep; if (OF_getprop(chosen, name, buf, sizeof(buf)) <= 0) return (ENXIO); + /* + * stdout-path may have a ':' to separate the device from the + * connection settings. Split the string so we just pass the former + * to OF_finddevice. + */ + sep = strchr(buf, ':'); + if (sep != NULL) + *sep = '\0'; if ((*node = OF_finddevice(buf)) == -1) return (ENXIO); return (0); } static const struct ofw_compat_data * uart_fdt_find_compatible(phandle_t node, const struct ofw_compat_data *cd) { const struct ofw_compat_data *ocd; for (ocd = cd; ocd->ocd_str != NULL; ocd++) { if (ofw_bus_node_is_compatible(node, ocd->ocd_str)) return (ocd); } return (NULL); } static uintptr_t uart_fdt_find_by_node(phandle_t node, int class_list) { struct ofw_compat_data **cd; const struct ofw_compat_data *ocd; if (class_list) { SET_FOREACH(cd, uart_fdt_class_set) { ocd = uart_fdt_find_compatible(node, *cd); if ((ocd != NULL) && (ocd->ocd_data != 0)) return (ocd->ocd_data); } } else { SET_FOREACH(cd, uart_fdt_class_and_device_set) { ocd = uart_fdt_find_compatible(node, *cd); if ((ocd != NULL) && (ocd->ocd_data != 0)) return (ocd->ocd_data); } } return (0); } int uart_cpu_fdt_probe(struct uart_class **classp, bus_space_tag_t *bst, bus_space_handle_t *bsh, int *baud, u_int *rclk, u_int *shiftp, u_int *iowidthp) { const char *propnames[] = {"stdout-path", "linux,stdout-path", "stdout", "stdin-path", "stdin", NULL}; const char **name; struct uart_class *class; phandle_t node, chosen; pcell_t br, clk, shift, iowidth; char *cp; int err; /* Has the user forced a specific device node? */ cp = kern_getenv("hw.fdt.console"); if (cp == NULL) { /* * Retrieve /chosen/std{in,out}. */ node = -1; if ((chosen = OF_finddevice("/chosen")) != -1) { for (name = propnames; *name != NULL; name++) { if (phandle_chosen_propdev(chosen, *name, &node) == 0) break; } } if (chosen == -1 || *name == NULL) node = OF_finddevice("serial0"); /* Last ditch */ } else { node = OF_finddevice(cp); } if (node == -1) return (ENXIO); /* * Check old style of UART definition first. Unfortunately, the common * FDT processing is not possible if we have clock, power domains and * pinmux stuff. */ class = (struct uart_class *)uart_fdt_find_by_node(node, 0); if (class != NULL) { if ((err = uart_fdt_get_clock(node, &clk)) != 0) return (err); } else { /* Check class only linker set */ class = (struct uart_class *)uart_fdt_find_by_node(node, 1); if (class == NULL) return (ENXIO); clk = 0; } /* * Retrieve serial attributes. */ if (uart_fdt_get_shift(node, &shift) != 0) shift = uart_getregshift(class); if (uart_fdt_get_io_width(node, &iowidth) != 0) iowidth = uart_getregiowidth(class); if (OF_getencprop(node, "current-speed", &br, sizeof(br)) <= 0) br = 0; err = OF_decode_addr(node, 0, bst, bsh, NULL); if (err != 0) return (err); *classp = class; *baud = br; *rclk = clk; *shiftp = shift; *iowidthp = iowidth; return (0); } static int uart_fdt_probe(device_t dev) { struct uart_softc *sc; phandle_t node; pcell_t clock, shift, iowidth; int err; sc = device_get_softc(dev); if (!ofw_bus_status_okay(dev)) return (ENXIO); sc->sc_class = (struct uart_class *)uart_fdt_find_device(dev); if (sc->sc_class == NULL) return (ENXIO); node = ofw_bus_get_node(dev); if ((err = uart_fdt_get_clock(node, &clock)) != 0) return (err); if (uart_fdt_get_shift(node, &shift) != 0) shift = uart_getregshift(sc->sc_class); if (uart_fdt_get_io_width(node, &iowidth) != 0) iowidth = uart_getregiowidth(sc->sc_class); return (uart_bus_probe(dev, (int)shift, (int)iowidth, (int)clock, 0, 0)); } DRIVER_MODULE(uart, simplebus, uart_fdt_driver, uart_devclass, 0, 0); DRIVER_MODULE(uart, ofwbus, uart_fdt_driver, uart_devclass, 0, 0); Index: projects/clang500-import/sys/dev/xen/netfront/netfront.c =================================================================== --- projects/clang500-import/sys/dev/xen/netfront/netfront.c (revision 319548) +++ projects/clang500-import/sys/dev/xen/netfront/netfront.c (revision 319549) @@ -1,2336 +1,2339 @@ /*- * Copyright (c) 2004-2006 Kip Macy * Copyright (c) 2015 Wei Liu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_if.h" /* Features supported by all backends. TSO and LRO can be negotiated */ #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) /* * Should the driver do LRO on the RX end * this can be toggled on the fly, but the * interface must be reset (down/up) for it * to take effect. */ static int xn_enable_lro = 1; TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); /* * Number of pairs of queues. */ static unsigned long xn_num_queues = 4; TUNABLE_ULONG("hw.xn.num_queues", &xn_num_queues); /** * \brief The maximum allowed data fragments in a single transmit * request. * * This limit is imposed by the backend driver. We assume here that * we are dealing with a Linux driver domain and have set our limit * to mirror the Linux MAX_SKB_FRAGS constant. */ #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) #define RX_COPY_THRESHOLD 256 #define net_ratelimit() 0 struct netfront_rxq; struct netfront_txq; struct netfront_info; struct netfront_rx_info; static void xn_txeof(struct netfront_txq *); static void xn_rxeof(struct netfront_rxq *); static void xn_alloc_rx_buffers(struct netfront_rxq *); static void xn_alloc_rx_buffers_callout(void *arg); static void xn_release_rx_bufs(struct netfront_rxq *); static void xn_release_tx_bufs(struct netfront_txq *); static void xn_rxq_intr(struct netfront_rxq *); static void xn_txq_intr(struct netfront_txq *); static void xn_intr(void *); static inline int xn_count_frags(struct mbuf *m); static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *); static int xn_ioctl(struct ifnet *, u_long, caddr_t); static void xn_ifinit_locked(struct netfront_info *); static void xn_ifinit(void *); static void xn_stop(struct netfront_info *); static void xn_query_features(struct netfront_info *np); static int xn_configure_features(struct netfront_info *np); static void netif_free(struct netfront_info *info); static int netfront_detach(device_t dev); static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *); static int xn_txq_mq_start(struct ifnet *, struct mbuf *); static int talk_to_backend(device_t dev, struct netfront_info *info); static int create_netdev(device_t dev); static void netif_disconnect_backend(struct netfront_info *info); static int setup_device(device_t dev, struct netfront_info *info, unsigned long); static int xn_ifmedia_upd(struct ifnet *ifp); static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int xn_connect(struct netfront_info *); static void xn_kick_rings(struct netfront_info *); static int xn_get_responses(struct netfront_rxq *, struct netfront_rx_info *, RING_IDX, RING_IDX *, struct mbuf **); #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) #define INVALID_P2M_ENTRY (~0UL) #define XN_QUEUE_NAME_LEN 8 /* xn{t,r}x_%u, allow for two digits */ struct netfront_rxq { struct netfront_info *info; u_int id; char name[XN_QUEUE_NAME_LEN]; struct mtx lock; int ring_ref; netif_rx_front_ring_t ring; xen_intr_handle_t xen_intr_handle; grant_ref_t gref_head; grant_ref_t grant_ref[NET_RX_RING_SIZE + 1]; struct mbuf *mbufs[NET_RX_RING_SIZE + 1]; struct lro_ctrl lro; struct callout rx_refill; }; struct netfront_txq { struct netfront_info *info; u_int id; char name[XN_QUEUE_NAME_LEN]; struct mtx lock; int ring_ref; netif_tx_front_ring_t ring; xen_intr_handle_t xen_intr_handle; grant_ref_t gref_head; grant_ref_t grant_ref[NET_TX_RING_SIZE + 1]; struct mbuf *mbufs[NET_TX_RING_SIZE + 1]; int mbufs_cnt; struct buf_ring *br; struct taskqueue *tq; struct task defrtask; bool full; }; struct netfront_info { struct ifnet *xn_ifp; struct mtx sc_lock; u_int num_queues; struct netfront_rxq *rxq; struct netfront_txq *txq; u_int carrier; u_int maxfrags; device_t xbdev; uint8_t mac[ETHER_ADDR_LEN]; int xn_if_flags; struct ifmedia sc_media; bool xn_reset; }; struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock) #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock) #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock) #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock) #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock) #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED); #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED); #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ static inline void add_id_to_freelist(struct mbuf **list, uintptr_t id) { KASSERT(id != 0, ("%s: the head item (0) must always be free.", __func__)); list[id] = list[0]; list[0] = (struct mbuf *)id; } static inline unsigned short get_id_from_freelist(struct mbuf **list) { uintptr_t id; id = (uintptr_t)list[0]; KASSERT(id != 0, ("%s: the head item (0) must always remain free.", __func__)); list[0] = list[id]; return (id); } static inline int xn_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct mbuf * xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri) { int i; struct mbuf *m; i = xn_rxidx(ri); m = rxq->mbufs[i]; rxq->mbufs[i] = NULL; return (m); } static inline grant_ref_t xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri) { int i = xn_rxidx(ri); grant_ref_t ref = rxq->grant_ref[i]; KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); rxq->grant_ref[i] = GRANT_REF_INVALID; return (ref); } #define IPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) #ifdef INVARIANTS #define WPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) #else #define WPRINTK(fmt, args...) #endif #ifdef DEBUG #define DPRINTK(fmt, args...) \ printf("[XEN] %s: " fmt, __func__, ##args) #else #define DPRINTK(fmt, args...) #endif /** * Read the 'mac' node at the given device's node in the store, and parse that * as colon-separated octets, placing result the given mac array. mac must be * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). * Return 0 on success, or errno on error. */ static int xen_net_read_mac(device_t dev, uint8_t mac[]) { int error, i; char *s, *e, *macstr; const char *path; path = xenbus_get_node(dev); error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); if (error == ENOENT) { /* * Deal with missing mac XenStore nodes on devices with * HVM emulation (the 'ioemu' configuration attribute) * enabled. * * The HVM emulator may execute in a stub device model * domain which lacks the permission, only given to Dom0, * to update the guest's XenStore tree. For this reason, * the HVM emulator doesn't even attempt to write the * front-side mac node, even when operating in Dom0. * However, there should always be a mac listed in the * backend tree. Fallback to this version if our query * of the front side XenStore location doesn't find * anything. */ path = xenbus_get_otherend_path(dev); error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); } if (error != 0) { xenbus_dev_fatal(dev, error, "parsing %s/mac", path); return (error); } s = macstr; for (i = 0; i < ETHER_ADDR_LEN; i++) { mac[i] = strtoul(s, &e, 16); if (s == e || (e[0] != ':' && e[0] != 0)) { free(macstr, M_XENBUS); return (ENOENT); } s = &e[1]; } free(macstr, M_XENBUS); return (0); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Connected state. */ static int netfront_probe(device_t dev) { if (xen_hvm_domain() && xen_disable_pv_nics != 0) return (ENXIO); if (!strcmp(xenbus_get_type(dev), "vif")) { device_set_desc(dev, "Virtual Network Interface"); return (0); } return (ENXIO); } static int netfront_attach(device_t dev) { int err; err = create_netdev(dev); if (err != 0) { xenbus_dev_fatal(dev, err, "creating netdev"); return (err); } SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_lro", CTLFLAG_RW, &xn_enable_lro, 0, "Large Receive Offload"); SYSCTL_ADD_ULONG(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_queues", CTLFLAG_RD, &xn_num_queues, "Number of pairs of queues"); return (0); } static int netfront_suspend(device_t dev) { struct netfront_info *np = device_get_softc(dev); u_int i; for (i = 0; i < np->num_queues; i++) { XN_RX_LOCK(&np->rxq[i]); XN_TX_LOCK(&np->txq[i]); } netfront_carrier_off(np); for (i = 0; i < np->num_queues; i++) { XN_RX_UNLOCK(&np->rxq[i]); XN_TX_UNLOCK(&np->txq[i]); } return (0); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(device_t dev) { struct netfront_info *info = device_get_softc(dev); u_int i; if (xen_suspend_cancelled) { for (i = 0; i < info->num_queues; i++) { XN_RX_LOCK(&info->rxq[i]); XN_TX_LOCK(&info->txq[i]); } netfront_carrier_on(info); for (i = 0; i < info->num_queues; i++) { XN_RX_UNLOCK(&info->rxq[i]); XN_TX_UNLOCK(&info->txq[i]); } return (0); } netif_disconnect_backend(info); return (0); } static int write_queue_xenstore_keys(device_t dev, struct netfront_rxq *rxq, struct netfront_txq *txq, struct xs_transaction *xst, bool hierarchy) { int err; const char *message; const char *node = xenbus_get_node(dev); char *path; size_t path_size; KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids")); /* Split event channel support is not yet there. */ KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle, ("Split event channels are not supported")); if (hierarchy) { path_size = strlen(node) + 10; path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO); snprintf(path, path_size, "%s/queue-%u", node, rxq->id); } else { path_size = strlen(node) + 1; path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO); snprintf(path, path_size, "%s", node); } err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref); if (err != 0) { message = "writing tx ring-ref"; goto error; } err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref); if (err != 0) { message = "writing rx ring-ref"; goto error; } err = xs_printf(*xst, path, "event-channel", "%u", xen_intr_port(rxq->xen_intr_handle)); if (err != 0) { message = "writing event-channel"; goto error; } free(path, M_DEVBUF); return (0); error: free(path, M_DEVBUF); xenbus_dev_fatal(dev, err, "%s", message); return (err); } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(device_t dev, struct netfront_info *info) { const char *message; struct xs_transaction xst; const char *node = xenbus_get_node(dev); int err; unsigned long num_queues, max_queues = 0; unsigned int i; err = xen_net_read_mac(dev, info->mac); if (err != 0) { xenbus_dev_fatal(dev, err, "parsing %s/mac", node); goto out; } err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev), "multi-queue-max-queues", NULL, "%lu", &max_queues); if (err != 0) max_queues = 1; num_queues = xn_num_queues; if (num_queues > max_queues) num_queues = max_queues; err = setup_device(dev, info, num_queues); if (err != 0) goto out; again: err = xs_transaction_start(&xst); if (err != 0) { xenbus_dev_fatal(dev, err, "starting transaction"); goto free; } if (info->num_queues == 1) { err = write_queue_xenstore_keys(dev, &info->rxq[0], &info->txq[0], &xst, false); if (err != 0) goto abort_transaction_no_def_error; } else { err = xs_printf(xst, node, "multi-queue-num-queues", "%u", info->num_queues); if (err != 0) { message = "writing multi-queue-num-queues"; goto abort_transaction; } for (i = 0; i < info->num_queues; i++) { err = write_queue_xenstore_keys(dev, &info->rxq[i], &info->txq[i], &xst, true); if (err != 0) goto abort_transaction_no_def_error; } } err = xs_printf(xst, node, "request-rx-copy", "%u", 1); if (err != 0) { message = "writing request-rx-copy"; goto abort_transaction; } err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); if (err != 0) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xs_printf(xst, node, "feature-sg", "%d", 1); if (err != 0) { message = "writing feature-sg"; goto abort_transaction; } if ((info->xn_ifp->if_capenable & IFCAP_LRO) != 0) { err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); if (err != 0) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } } if ((info->xn_ifp->if_capenable & IFCAP_RXCSUM) == 0) { err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1); if (err != 0) { message = "writing feature-no-csum-offload"; goto abort_transaction; } } err = xs_transaction_end(xst, 0); if (err != 0) { if (err == EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free; } return 0; abort_transaction: xenbus_dev_fatal(dev, err, "%s", message); abort_transaction_no_def_error: xs_transaction_end(xst, 1); free: netif_free(info); out: return (err); } static void xn_rxq_intr(struct netfront_rxq *rxq) { XN_RX_LOCK(rxq); xn_rxeof(rxq); XN_RX_UNLOCK(rxq); } static void xn_txq_start(struct netfront_txq *txq) { struct netfront_info *np = txq->info; struct ifnet *ifp = np->xn_ifp; XN_TX_LOCK_ASSERT(txq); if (!drbr_empty(ifp, txq->br)) xn_txq_mq_start_locked(txq, NULL); } static void xn_txq_intr(struct netfront_txq *txq) { XN_TX_LOCK(txq); if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring)) xn_txeof(txq); xn_txq_start(txq); XN_TX_UNLOCK(txq); } static void xn_txq_tq_deferred(void *xtxq, int pending) { struct netfront_txq *txq = xtxq; XN_TX_LOCK(txq); xn_txq_start(txq); XN_TX_UNLOCK(txq); } static void disconnect_rxq(struct netfront_rxq *rxq) { xn_release_rx_bufs(rxq); gnttab_free_grant_references(rxq->gref_head); gnttab_end_foreign_access(rxq->ring_ref, NULL); /* * No split event channel support at the moment, handle will * be unbound in tx. So no need to call xen_intr_unbind here, * but we do want to reset the handler to 0. */ rxq->xen_intr_handle = 0; } static void destroy_rxq(struct netfront_rxq *rxq) { callout_drain(&rxq->rx_refill); free(rxq->ring.sring, M_DEVBUF); } static void destroy_rxqs(struct netfront_info *np) { int i; for (i = 0; i < np->num_queues; i++) destroy_rxq(&np->rxq[i]); free(np->rxq, M_DEVBUF); np->rxq = NULL; } static int setup_rxqs(device_t dev, struct netfront_info *info, unsigned long num_queues) { int q, i; int error; netif_rx_sring_t *rxs; struct netfront_rxq *rxq; info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues, M_DEVBUF, M_WAITOK|M_ZERO); for (q = 0; q < num_queues; q++) { rxq = &info->rxq[q]; rxq->id = q; rxq->info = info; rxq->ring_ref = GRANT_REF_INVALID; rxq->ring.sring = NULL; snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q); mtx_init(&rxq->lock, rxq->name, "netfront receive lock", MTX_DEF); for (i = 0; i <= NET_RX_RING_SIZE; i++) { rxq->mbufs[i] = NULL; rxq->grant_ref[i] = GRANT_REF_INVALID; } /* Start resources allocation */ if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, &rxq->gref_head) != 0) { device_printf(dev, "allocating rx gref"); error = ENOMEM; goto fail; } rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK|M_ZERO); SHARED_RING_INIT(rxs); FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE); error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &rxq->ring_ref); if (error != 0) { device_printf(dev, "granting rx ring page"); goto fail_grant_ring; } callout_init(&rxq->rx_refill, 1); } return (0); fail_grant_ring: gnttab_free_grant_references(rxq->gref_head); free(rxq->ring.sring, M_DEVBUF); fail: for (; q >= 0; q--) { disconnect_rxq(&info->rxq[q]); destroy_rxq(&info->rxq[q]); } free(info->rxq, M_DEVBUF); return (error); } static void disconnect_txq(struct netfront_txq *txq) { xn_release_tx_bufs(txq); gnttab_free_grant_references(txq->gref_head); gnttab_end_foreign_access(txq->ring_ref, NULL); xen_intr_unbind(&txq->xen_intr_handle); } static void destroy_txq(struct netfront_txq *txq) { free(txq->ring.sring, M_DEVBUF); buf_ring_free(txq->br, M_DEVBUF); taskqueue_drain_all(txq->tq); taskqueue_free(txq->tq); } static void destroy_txqs(struct netfront_info *np) { int i; for (i = 0; i < np->num_queues; i++) destroy_txq(&np->txq[i]); free(np->txq, M_DEVBUF); np->txq = NULL; } static int setup_txqs(device_t dev, struct netfront_info *info, unsigned long num_queues) { int q, i; int error; netif_tx_sring_t *txs; struct netfront_txq *txq; info->txq = malloc(sizeof(struct netfront_txq) * num_queues, M_DEVBUF, M_WAITOK|M_ZERO); for (q = 0; q < num_queues; q++) { txq = &info->txq[q]; txq->id = q; txq->info = info; txq->ring_ref = GRANT_REF_INVALID; txq->ring.sring = NULL; snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q); mtx_init(&txq->lock, txq->name, "netfront transmit lock", MTX_DEF); for (i = 0; i <= NET_TX_RING_SIZE; i++) { txq->mbufs[i] = (void *) ((u_long) i+1); txq->grant_ref[i] = GRANT_REF_INVALID; } txq->mbufs[NET_TX_RING_SIZE] = (void *)0; /* Start resources allocation. */ if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, &txq->gref_head) != 0) { device_printf(dev, "failed to allocate tx grant refs\n"); error = ENOMEM; goto fail; } txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK|M_ZERO); SHARED_RING_INIT(txs); FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE); error = xenbus_grant_ring(dev, virt_to_mfn(txs), &txq->ring_ref); if (error != 0) { device_printf(dev, "failed to grant tx ring\n"); goto fail_grant_ring; } txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF, M_WAITOK, &txq->lock); TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq); txq->tq = taskqueue_create(txq->name, M_WAITOK, taskqueue_thread_enqueue, &txq->tq); error = taskqueue_start_threads(&txq->tq, 1, PI_NET, "%s txq %d", device_get_nameunit(dev), txq->id); if (error != 0) { device_printf(dev, "failed to start tx taskq %d\n", txq->id); goto fail_start_thread; } error = xen_intr_alloc_and_bind_local_port(dev, xenbus_get_otherend_id(dev), /* filter */ NULL, xn_intr, &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &txq->xen_intr_handle); if (error != 0) { device_printf(dev, "xen_intr_alloc_and_bind_local_port failed\n"); goto fail_bind_port; } } return (0); fail_bind_port: taskqueue_drain_all(txq->tq); fail_start_thread: buf_ring_free(txq->br, M_DEVBUF); taskqueue_free(txq->tq); gnttab_end_foreign_access(txq->ring_ref, NULL); fail_grant_ring: gnttab_free_grant_references(txq->gref_head); free(txq->ring.sring, M_DEVBUF); fail: for (; q >= 0; q--) { disconnect_txq(&info->txq[q]); destroy_txq(&info->txq[q]); } free(info->txq, M_DEVBUF); return (error); } static int setup_device(device_t dev, struct netfront_info *info, unsigned long num_queues) { int error; int q; if (info->txq) destroy_txqs(info); if (info->rxq) destroy_rxqs(info); info->num_queues = 0; error = setup_rxqs(dev, info, num_queues); if (error != 0) goto out; error = setup_txqs(dev, info, num_queues); if (error != 0) goto out; info->num_queues = num_queues; /* No split event channel at the moment. */ for (q = 0; q < num_queues; q++) info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle; return (0); out: KASSERT(error != 0, ("Error path taken without providing an error code")); return (error); } #ifdef INET /** * If this interface has an ipv4 address, send an arp for it. This * helps to get the network going again after migrating hosts. */ static void netfront_send_fake_arp(device_t dev, struct netfront_info *info) { struct ifnet *ifp; struct ifaddr *ifa; ifp = info->xn_ifp; TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family == AF_INET) { arp_ifinit(ifp, ifa); } } } #endif /** * Callback received when the backend's state changes. */ static void netfront_backend_changed(device_t dev, XenbusState newstate) { struct netfront_info *sc = device_get_softc(dev); DPRINTK("newstate=%d\n", newstate); switch (newstate) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateUnknown: case XenbusStateReconfigured: case XenbusStateReconfiguring: break; case XenbusStateInitWait: if (xenbus_get_state(dev) != XenbusStateInitialising) break; if (xn_connect(sc) != 0) break; /* Switch to connected state before kicking the rings. */ xenbus_set_state(sc->xbdev, XenbusStateConnected); xn_kick_rings(sc); break; case XenbusStateClosing: xenbus_set_state(dev, XenbusStateClosed); break; case XenbusStateClosed: if (sc->xn_reset) { netif_disconnect_backend(sc); xenbus_set_state(dev, XenbusStateInitialising); sc->xn_reset = false; } break; case XenbusStateConnected: #ifdef INET netfront_send_fake_arp(dev, sc); #endif break; } } /** * \brief Verify that there is sufficient space in the Tx ring * buffer for a maximally sized request to be enqueued. * * A transmit request requires a transmit descriptor for each packet * fragment, plus up to 2 entries for "options" (e.g. TSO). */ static inline int xn_tx_slot_available(struct netfront_txq *txq) { return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2)); } static void xn_release_tx_bufs(struct netfront_txq *txq) { int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { struct mbuf *m; m = txq->mbufs[i]; /* * We assume that no kernel addresses are * less than NET_TX_RING_SIZE. Any entry * in the table that is below this number * must be an index from free-list tracking. */ if (((uintptr_t)m) <= NET_TX_RING_SIZE) continue; gnttab_end_foreign_access_ref(txq->grant_ref[i]); gnttab_release_grant_reference(&txq->gref_head, txq->grant_ref[i]); txq->grant_ref[i] = GRANT_REF_INVALID; add_id_to_freelist(txq->mbufs, i); txq->mbufs_cnt--; if (txq->mbufs_cnt < 0) { panic("%s: tx_chain_cnt must be >= 0", __func__); } m_free(m); } } static struct mbuf * xn_alloc_one_rx_buffer(struct netfront_rxq *rxq) { struct mbuf *m; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) return NULL; m->m_len = m->m_pkthdr.len = MJUMPAGESIZE; return (m); } static void xn_alloc_rx_buffers(struct netfront_rxq *rxq) { RING_IDX req_prod; int notify; XN_RX_LOCK_ASSERT(rxq); if (__predict_false(rxq->info->carrier == 0)) return; for (req_prod = rxq->ring.req_prod_pvt; req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE; req_prod++) { struct mbuf *m; unsigned short id; grant_ref_t ref; struct netif_rx_request *req; unsigned long pfn; m = xn_alloc_one_rx_buffer(rxq); if (m == NULL) break; id = xn_rxidx(req_prod); KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain")); rxq->mbufs[id] = m; ref = gnttab_claim_grant_reference(&rxq->gref_head); KASSERT(ref != GNTTAB_LIST_END, ("reserved grant references exhuasted")); rxq->grant_ref[id] = ref; pfn = atop(vtophys(mtod(m, vm_offset_t))); req = RING_GET_REQUEST(&rxq->ring, req_prod); gnttab_grant_foreign_access_ref(ref, xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0); req->id = id; req->gref = ref; } rxq->ring.req_prod_pvt = req_prod; /* Not enough requests? Try again later. */ if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) { callout_reset_curcpu(&rxq->rx_refill, hz/10, xn_alloc_rx_buffers_callout, rxq); return; } wmb(); /* barrier so backend seens requests */ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify); if (notify) xen_intr_signal(rxq->xen_intr_handle); } static void xn_alloc_rx_buffers_callout(void *arg) { struct netfront_rxq *rxq; rxq = (struct netfront_rxq *)arg; XN_RX_LOCK(rxq); xn_alloc_rx_buffers(rxq); XN_RX_UNLOCK(rxq); } static void xn_release_rx_bufs(struct netfront_rxq *rxq) { int i, ref; struct mbuf *m; for (i = 0; i < NET_RX_RING_SIZE; i++) { m = rxq->mbufs[i]; if (m == NULL) continue; ref = rxq->grant_ref[i]; if (ref == GRANT_REF_INVALID) continue; gnttab_end_foreign_access_ref(ref); gnttab_release_grant_reference(&rxq->gref_head, ref); rxq->mbufs[i] = NULL; rxq->grant_ref[i] = GRANT_REF_INVALID; m_freem(m); } } static void xn_rxeof(struct netfront_rxq *rxq) { struct ifnet *ifp; struct netfront_info *np = rxq->info; #if (defined(INET) || defined(INET6)) struct lro_ctrl *lro = &rxq->lro; #endif struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct mbuf *m; struct mbufq mbufq_rxq, mbufq_errq; int err, work_to_do; XN_RX_LOCK_ASSERT(rxq); if (!netfront_carrier_ok(np)) return; /* XXX: there should be some sane limit. */ mbufq_init(&mbufq_errq, INT_MAX); mbufq_init(&mbufq_rxq, INT_MAX); ifp = np->xn_ifp; do { rp = rxq->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = rxq->ring.rsp_cons; while ((i != rp)) { memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); m = NULL; err = xn_get_responses(rxq, &rinfo, rp, &i, &m); if (__predict_false(err)) { if (m) (void )mbufq_enqueue(&mbufq_errq, m); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } m->m_pkthdr.rcvif = ifp; if (rx->flags & NETRXF_data_validated) { /* * According to mbuf(9) the correct way to tell * the stack that the checksum of an inbound * packet is correct, without it actually being * present (because the underlying interface * doesn't provide it), is to set the * CSUM_DATA_VALID and CSUM_PSEUDO_HDR flags, * and the csum_data field to 0xffff. */ m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = 0xffff; } if ((rx->flags & NETRXF_extra_info) != 0 && (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type == XEN_NETIF_EXTRA_TYPE_GSO)) { m->m_pkthdr.tso_segsz = extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size; m->m_pkthdr.csum_flags |= CSUM_TSO; } (void )mbufq_enqueue(&mbufq_rxq, m); } rxq->ring.rsp_cons = i; xn_alloc_rx_buffers(rxq); RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do); } while (work_to_do); mbufq_drain(&mbufq_errq); /* * Process all the mbufs after the remapping is complete. * Break the mbuf chain first though. */ while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #if (defined(INET) || defined(INET6)) /* Use LRO if possible */ if ((ifp->if_capenable & IFCAP_LRO) == 0 || lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { /* * If LRO fails, pass up to the stack * directly. */ (*ifp->if_input)(ifp, m); } #else (*ifp->if_input)(ifp, m); #endif } #if (defined(INET) || defined(INET6)) /* * Flush any outstanding LRO work */ tcp_lro_flush_all(lro); #endif } static void xn_txeof(struct netfront_txq *txq) { RING_IDX i, prod; unsigned short id; struct ifnet *ifp; netif_tx_response_t *txr; struct mbuf *m; struct netfront_info *np = txq->info; XN_TX_LOCK_ASSERT(txq); if (!netfront_carrier_ok(np)) return; ifp = np->xn_ifp; do { prod = txq->ring.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (i = txq->ring.rsp_cons; i != prod; i++) { txr = RING_GET_RESPONSE(&txq->ring, i); if (txr->status == NETIF_RSP_NULL) continue; if (txr->status != NETIF_RSP_OKAY) { printf("%s: WARNING: response is %d!\n", __func__, txr->status); } id = txr->id; m = txq->mbufs[id]; KASSERT(m != NULL, ("mbuf not found in chain")); KASSERT((uintptr_t)m > NET_TX_RING_SIZE, ("mbuf already on the free list, but we're " "trying to free it again!")); M_ASSERTVALID(m); if (__predict_false(gnttab_query_foreign_access( txq->grant_ref[id]) != 0)) { panic("%s: grant id %u still in use by the " "backend", __func__, id); } gnttab_end_foreign_access_ref(txq->grant_ref[id]); gnttab_release_grant_reference( &txq->gref_head, txq->grant_ref[id]); txq->grant_ref[id] = GRANT_REF_INVALID; txq->mbufs[id] = NULL; add_id_to_freelist(txq->mbufs, id); txq->mbufs_cnt--; m_free(m); /* Only mark the txq active if we've freed up at least one slot to try */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } txq->ring.rsp_cons = prod; /* * Set a new event, then check for race with update of * tx_cons. Note that it is essential to schedule a * callback, no matter how few buffers are pending. Even if * there is space in the transmit ring, higher layers may * be blocked because too much data is outstanding: in such * cases notification from Xen is likely to be the only kick * that we'll get. */ txq->ring.sring->rsp_event = prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1; mb(); } while (prod != txq->ring.sring->rsp_prod); if (txq->full && ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) { txq->full = false; xn_txq_start(txq); } } static void xn_intr(void *xsc) { struct netfront_txq *txq = xsc; struct netfront_info *np = txq->info; struct netfront_rxq *rxq = &np->rxq[txq->id]; /* kick both tx and rx */ xn_rxq_intr(rxq); xn_txq_intr(txq); } static void xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m, grant_ref_t ref) { int new = xn_rxidx(rxq->ring.req_prod_pvt); KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL")); rxq->mbufs[new] = m; rxq->grant_ref[new] = ref; RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new; RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref; rxq->ring.req_prod_pvt++; } static int xn_get_extras(struct netfront_rxq *rxq, struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) { struct netif_extra_info *extra; int err = 0; do { struct mbuf *m; grant_ref_t ref; if (__predict_false(*cons + 1 == rp)) { err = EINVAL; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&rxq->ring, ++(*cons)); if (__predict_false(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { err = EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } m = xn_get_rx_mbuf(rxq, *cons); ref = xn_get_rx_ref(rxq, *cons); xn_move_rx_slot(rxq, m, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); return err; } static int xn_get_responses(struct netfront_rxq *rxq, struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, struct mbuf **list) { struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; struct mbuf *m, *m0, *m_prev; grant_ref_t ref = xn_get_rx_ref(rxq, *cons); RING_IDX ref_cons = *cons; int frags = 1; int err = 0; u_long ret; m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons); if (rx->flags & NETRXF_extra_info) { err = xn_get_extras(rxq, extras, rp, cons); } if (m0 != NULL) { m0->m_pkthdr.len = 0; m0->m_next = NULL; } for (;;) { #if 0 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", rx->status, rx->offset, frags); #endif if (__predict_false(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { xn_move_rx_slot(rxq, m, ref); if (m0 == m) m0 = NULL; m = NULL; err = EINVAL; goto next_skip_queue; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_REF_INVALID) { printf("%s: Bad rx response id %d.\n", __func__, rx->id); err = EINVAL; goto next; } ret = gnttab_end_foreign_access_ref(ref); KASSERT(ret, ("Unable to end access to grant references")); gnttab_release_grant_reference(&rxq->gref_head, ref); next: if (m == NULL) break; m->m_len = rx->status; m->m_data += rx->offset; m0->m_pkthdr.len += rx->status; next_skip_queue: if (!(rx->flags & NETRXF_more_data)) break; if (*cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = ENOENT; printf("%s: cons %u frags %u rp %u, not enough frags\n", __func__, *cons, frags, rp); break; } /* * Note that m can be NULL, if rx->status < 0 or if * rx->offset + rx->status > PAGE_SIZE above. */ m_prev = m; rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags); m = xn_get_rx_mbuf(rxq, *cons + frags); /* * m_prev == NULL can happen if rx->status < 0 or if * rx->offset + * rx->status > PAGE_SIZE above. */ if (m_prev != NULL) m_prev->m_next = m; /* * m0 can be NULL if rx->status < 0 or if * rx->offset + * rx->status > PAGE_SIZE above. */ if (m0 == NULL) m0 = m; m->m_next = NULL; ref = xn_get_rx_ref(rxq, *cons + frags); ref_cons = *cons + frags; frags++; } *list = m0; *cons += frags; return (err); } /** * \brief Count the number of fragments in an mbuf chain. * * Surprisingly, there isn't an M* macro for this. */ static inline int xn_count_frags(struct mbuf *m) { int nfrags; for (nfrags = 0; m != NULL; m = m->m_next) nfrags++; return (nfrags); } /** * Given an mbuf chain, make sure we have enough room and then push * it onto the transmit ring. */ static int xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head) { struct mbuf *m; struct netfront_info *np = txq->info; struct ifnet *ifp = np->xn_ifp; u_int nfrags; int otherend_id; /** * Defragment the mbuf if necessary. */ nfrags = xn_count_frags(m_head); /* * Check to see whether this request is longer than netback * can handle, and try to defrag it. */ /** * It is a bit lame, but the netback driver in Linux can't * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of * the Linux network stack. */ if (nfrags > np->maxfrags) { m = m_defrag(m_head, M_NOWAIT); if (!m) { /* * Defrag failed, so free the mbuf and * therefore drop the packet. */ m_freem(m_head); return (EMSGSIZE); } m_head = m; } /* Determine how many fragments now exist */ nfrags = xn_count_frags(m_head); /* * Check to see whether the defragmented packet has too many * segments for the Linux netback driver. */ /** * The FreeBSD TCP stack, with TSO enabled, can produce a chain * of mbufs longer than Linux can handle. Make sure we don't * pass a too-long chain over to the other side by dropping the * packet. It doesn't look like there is currently a way to * tell the TCP stack to generate a shorter chain of packets. */ if (nfrags > MAX_TX_REQ_FRAGS) { #ifdef DEBUG printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " "won't be able to handle it, dropping\n", __func__, nfrags, MAX_TX_REQ_FRAGS); #endif m_freem(m_head); return (EMSGSIZE); } /* * This check should be redundant. We've already verified that we * have enough slots in the ring to handle a packet of maximum * size, and that our packet is less than the maximum size. Keep * it in here as an assert for now just to make certain that * chain_cnt is accurate. */ KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE, ("%s: chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " "(%d)!", __func__, (int) txq->mbufs_cnt, (int) nfrags, (int) NET_TX_RING_SIZE)); /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; otherend_id = xenbus_get_otherend_id(np->xbdev); for (m = m_head; m; m = m->m_next) { netif_tx_request_t *tx; uintptr_t id; grant_ref_t ref; u_long mfn; /* XXX Wrong type? */ tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt); id = get_id_from_freelist(txq->mbufs); if (id == 0) panic("%s: was allocated the freelist head!\n", __func__); txq->mbufs_cnt++; if (txq->mbufs_cnt > NET_TX_RING_SIZE) panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", __func__); txq->mbufs[id] = m; tx->id = id; ref = gnttab_claim_grant_reference(&txq->gref_head); KASSERT((short)ref >= 0, ("Negative ref")); mfn = virt_to_mfn(mtod(m, vm_offset_t)); gnttab_grant_foreign_access_ref(ref, otherend_id, mfn, GNTMAP_readonly); tx->gref = txq->grant_ref[id] = ref; tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); tx->flags = 0; if (m == m_head) { /* * The first fragment has the entire packet * size, subsequent fragments have just the * fragment size. The backend works out the * true size of the first fragment by * subtracting the sizes of the other * fragments. */ tx->size = m->m_pkthdr.len; /* * The first fragment contains the checksum flags * and is optionally followed by extra data for * TSO etc. */ /** * CSUM_TSO requires checksum offloading. * Some versions of FreeBSD fail to * set CSUM_TCP in the CSUM_TSO case, * so we have to test for CSUM_TSO * explicitly. */ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) { tx->flags |= (NETTXF_csum_blank | NETTXF_data_validated); } if (m->m_pkthdr.csum_flags & CSUM_TSO) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&txq->ring, ++txq->ring.req_prod_pvt); tx->flags |= NETTXF_extra_info; gso->u.gso.size = m->m_pkthdr.tso_segsz; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } } else { tx->size = m->m_len; } if (m->m_next) tx->flags |= NETTXF_more_data; txq->ring.req_prod_pvt++; } BPF_MTAP(ifp, m_head); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len); if (m_head->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); xn_txeof(txq); return (0); } /* equivalent of network_open() in Linux */ static void xn_ifinit_locked(struct netfront_info *np) { struct ifnet *ifp; int i; struct netfront_rxq *rxq; XN_LOCK_ASSERT(np); ifp = np->xn_ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING || !netfront_carrier_ok(np)) return; xn_stop(np); for (i = 0; i < np->num_queues; i++) { rxq = &np->rxq[i]; XN_RX_LOCK(rxq); xn_alloc_rx_buffers(rxq); rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring)) xn_rxeof(rxq); XN_RX_UNLOCK(rxq); } ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_UP); } static void xn_ifinit(void *xsc) { struct netfront_info *sc = xsc; XN_LOCK(sc); xn_ifinit_locked(sc); XN_UNLOCK(sc); } static int xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct netfront_info *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; device_t dev; #ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; #endif int mask, error = 0, reinit; dev = sc->xbdev; switch(cmd) { case SIOCSIFADDR: #ifdef INET XN_LOCK(sc); if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) xn_ifinit_locked(sc); arp_ifinit(ifp, ifa); XN_UNLOCK(sc); } else { XN_UNLOCK(sc); #endif error = ether_ioctl(ifp, cmd, data); #ifdef INET } #endif break; case SIOCSIFMTU: + if (ifp->if_mtu == ifr->ifr_mtu) + break; + ifp->if_mtu = ifr->ifr_mtu; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xn_ifinit(sc); break; case SIOCSIFFLAGS: XN_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ xn_ifinit_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { xn_stop(sc); } } sc->xn_if_flags = ifp->if_flags; XN_UNLOCK(sc); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; reinit = 0; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= XN_CSUM_FEATURES; } if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; ifp->if_hwassist ^= CSUM_TSO; } if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) { /* These Rx features require us to renegotiate. */ reinit = 1; if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; } if (reinit == 0) break; /* * We must reset the interface so the backend picks up the * new features. */ device_printf(sc->xbdev, "performing interface reset due to feature change\n"); XN_LOCK(sc); netfront_carrier_off(sc); sc->xn_reset = true; /* * NB: the pending packet queue is not flushed, since * the interface should still support the old options. */ XN_UNLOCK(sc); /* * Delete the xenstore nodes that export features. * * NB: There's a xenbus state called * "XenbusStateReconfiguring", which is what we should set * here. Sadly none of the backends know how to handle it, * and simply disconnect from the frontend, so we will just * switch back to XenbusStateInitialising in order to force * a reconnection. */ xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4"); xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload"); xenbus_set_state(dev, XenbusStateClosing); /* * Wait for the frontend to reconnect before returning * from the ioctl. 30s should be more than enough for any * sane backend to reconnect. */ error = tsleep(sc, 0, "xn_rst", 30*hz); break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); } return (error); } static void xn_stop(struct netfront_info *sc) { struct ifnet *ifp; XN_LOCK_ASSERT(sc); ifp = sc->xn_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if_link_state_change(ifp, LINK_STATE_DOWN); } static void xn_rebuild_rx_bufs(struct netfront_rxq *rxq) { int requeue_idx, i; grant_ref_t ref; netif_rx_request_t *req; for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { struct mbuf *m; u_long pfn; if (rxq->mbufs[i] == NULL) continue; m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i); ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i); req = RING_GET_REQUEST(&rxq->ring, requeue_idx); pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; gnttab_grant_foreign_access_ref(ref, xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0); req->gref = ref; req->id = requeue_idx; requeue_idx++; } rxq->ring.req_prod_pvt = requeue_idx; } /* START of Xenolinux helper functions adapted to FreeBSD */ static int xn_connect(struct netfront_info *np) { int i, error; u_int feature_rx_copy; struct netfront_rxq *rxq; struct netfront_txq *txq; error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-rx-copy", NULL, "%u", &feature_rx_copy); if (error != 0) feature_rx_copy = 0; /* We only support rx copy. */ if (!feature_rx_copy) return (EPROTONOSUPPORT); /* Recovery procedure: */ error = talk_to_backend(np->xbdev, np); if (error != 0) return (error); /* Step 1: Reinitialise variables. */ xn_query_features(np); xn_configure_features(np); /* Step 2: Release TX buffer */ for (i = 0; i < np->num_queues; i++) { txq = &np->txq[i]; xn_release_tx_bufs(txq); } /* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */ for (i = 0; i < np->num_queues; i++) { rxq = &np->rxq[i]; xn_rebuild_rx_bufs(rxq); } /* Step 4: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); wakeup(np); return (0); } static void xn_kick_rings(struct netfront_info *np) { struct netfront_rxq *rxq; struct netfront_txq *txq; int i; for (i = 0; i < np->num_queues; i++) { txq = &np->txq[i]; rxq = &np->rxq[i]; xen_intr_signal(txq->xen_intr_handle); XN_TX_LOCK(txq); xn_txeof(txq); XN_TX_UNLOCK(txq); XN_RX_LOCK(rxq); xn_alloc_rx_buffers(rxq); XN_RX_UNLOCK(rxq); } } static void xn_query_features(struct netfront_info *np) { int val; device_printf(np->xbdev, "backend features:"); if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-sg", NULL, "%d", &val) != 0) val = 0; np->maxfrags = 1; if (val) { np->maxfrags = MAX_TX_REQ_FRAGS; printf(" feature-sg"); } if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-gso-tcpv4", NULL, "%d", &val) != 0) val = 0; np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO); if (val) { np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO; printf(" feature-gso-tcp4"); } /* * HW CSUM offload is assumed to be available unless * feature-no-csum-offload is set in xenstore. */ if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-no-csum-offload", NULL, "%d", &val) != 0) val = 0; np->xn_ifp->if_capabilities |= IFCAP_HWCSUM; if (val) { np->xn_ifp->if_capabilities &= ~(IFCAP_HWCSUM); printf(" feature-no-csum-offload"); } printf("\n"); } static int xn_configure_features(struct netfront_info *np) { int err, cap_enabled; #if (defined(INET) || defined(INET6)) int i; #endif struct ifnet *ifp; ifp = np->xn_ifp; err = 0; if ((ifp->if_capenable & ifp->if_capabilities) == ifp->if_capenable) { /* Current options are available, no need to do anything. */ return (0); } /* Try to preserve as many options as possible. */ cap_enabled = ifp->if_capenable; ifp->if_capenable = ifp->if_hwassist = 0; #if (defined(INET) || defined(INET6)) if ((cap_enabled & IFCAP_LRO) != 0) for (i = 0; i < np->num_queues; i++) tcp_lro_free(&np->rxq[i].lro); if (xn_enable_lro && (ifp->if_capabilities & cap_enabled & IFCAP_LRO) != 0) { ifp->if_capenable |= IFCAP_LRO; for (i = 0; i < np->num_queues; i++) { err = tcp_lro_init(&np->rxq[i].lro); if (err != 0) { device_printf(np->xbdev, "LRO initialization failed\n"); ifp->if_capenable &= ~IFCAP_LRO; break; } np->rxq[i].lro.ifp = ifp; } } if ((ifp->if_capabilities & cap_enabled & IFCAP_TSO4) != 0) { ifp->if_capenable |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_TSO; } #endif if ((ifp->if_capabilities & cap_enabled & IFCAP_TXCSUM) != 0) { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist |= XN_CSUM_FEATURES; } if ((ifp->if_capabilities & cap_enabled & IFCAP_RXCSUM) != 0) ifp->if_capenable |= IFCAP_RXCSUM; return (err); } static int xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m) { struct netfront_info *np; struct ifnet *ifp; struct buf_ring *br; int error, notify; np = txq->info; br = txq->br; ifp = np->xn_ifp; error = 0; XN_TX_LOCK_ASSERT(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !netfront_carrier_ok(np)) { if (m != NULL) error = drbr_enqueue(ifp, br, m); return (error); } if (m != NULL) { error = drbr_enqueue(ifp, br, m); if (error != 0) return (error); } while ((m = drbr_peek(ifp, br)) != NULL) { if (!xn_tx_slot_available(txq)) { drbr_putback(ifp, br, m); break; } error = xn_assemble_tx_request(txq, m); /* xn_assemble_tx_request always consumes the mbuf*/ if (error != 0) { drbr_advance(ifp, br); break; } RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify); if (notify) xen_intr_signal(txq->xen_intr_handle); drbr_advance(ifp, br); } if (RING_FULL(&txq->ring)) txq->full = true; return (0); } static int xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m) { struct netfront_info *np; struct netfront_txq *txq; int i, npairs, error; np = ifp->if_softc; npairs = np->num_queues; if (!netfront_carrier_ok(np)) return (ENOBUFS); KASSERT(npairs != 0, ("called with 0 available queues")); /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % npairs; else i = curcpu % npairs; txq = &np->txq[i]; if (XN_TX_TRYLOCK(txq) != 0) { error = xn_txq_mq_start_locked(txq, m); XN_TX_UNLOCK(txq); } else { error = drbr_enqueue(ifp, txq->br, m); taskqueue_enqueue(txq->tq, &txq->defrtask); } return (error); } static void xn_qflush(struct ifnet *ifp) { struct netfront_info *np; struct netfront_txq *txq; struct mbuf *m; int i; np = ifp->if_softc; for (i = 0; i < np->num_queues; i++) { txq = &np->txq[i]; XN_TX_LOCK(txq); while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) m_freem(m); XN_TX_UNLOCK(txq); } if_qflush(ifp); } /** * Create a network device. * @param dev Newbus device representing this virtual NIC. */ int create_netdev(device_t dev) { struct netfront_info *np; int err; struct ifnet *ifp; np = device_get_softc(dev); np->xbdev = dev; mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF); ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); err = xen_net_read_mac(dev, np->mac); if (err != 0) goto error; /* Set up ifnet structure */ ifp = np->xn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = np; if_initname(ifp, "xn", device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xn_ioctl; ifp->if_transmit = xn_txq_mq_start; ifp->if_qflush = xn_qflush; ifp->if_init = xn_ifinit; ifp->if_hwassist = XN_CSUM_FEATURES; /* Enable all supported features at device creation. */ ifp->if_capenable = ifp->if_capabilities = IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO; ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; ether_ifattach(ifp, np->mac); netfront_carrier_off(np); return (0); error: KASSERT(err != 0, ("Error path with no error code specified")); return (err); } static int netfront_detach(device_t dev) { struct netfront_info *info = device_get_softc(dev); DPRINTK("%s\n", xenbus_get_node(dev)); netif_free(info); return 0; } static void netif_free(struct netfront_info *np) { XN_LOCK(np); xn_stop(np); XN_UNLOCK(np); netif_disconnect_backend(np); ether_ifdetach(np->xn_ifp); free(np->rxq, M_DEVBUF); free(np->txq, M_DEVBUF); if_free(np->xn_ifp); np->xn_ifp = NULL; ifmedia_removeall(&np->sc_media); } static void netif_disconnect_backend(struct netfront_info *np) { u_int i; for (i = 0; i < np->num_queues; i++) { XN_RX_LOCK(&np->rxq[i]); XN_TX_LOCK(&np->txq[i]); } netfront_carrier_off(np); for (i = 0; i < np->num_queues; i++) { XN_RX_UNLOCK(&np->rxq[i]); XN_TX_UNLOCK(&np->txq[i]); } for (i = 0; i < np->num_queues; i++) { disconnect_rxq(&np->rxq[i]); disconnect_txq(&np->txq[i]); } } static int xn_ifmedia_upd(struct ifnet *ifp) { return (0); } static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /* ** Driver registration ** */ static device_method_t netfront_methods[] = { /* Device interface */ DEVMETHOD(device_probe, netfront_probe), DEVMETHOD(device_attach, netfront_attach), DEVMETHOD(device_detach, netfront_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, netfront_suspend), DEVMETHOD(device_resume, netfront_resume), /* Xenbus interface */ DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), DEVMETHOD_END }; static driver_t netfront_driver = { "xn", netfront_methods, sizeof(struct netfront_info), }; devclass_t netfront_devclass; DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL, NULL); Index: projects/clang500-import/sys/fs/msdosfs/denode.h =================================================================== --- projects/clang500-import/sys/fs/msdosfs/denode.h (revision 319548) +++ projects/clang500-import/sys/fs/msdosfs/denode.h (revision 319549) @@ -1,280 +1,280 @@ /* $FreeBSD$ */ /* $NetBSD: denode.h,v 1.25 1997/11/17 15:36:28 ws Exp $ */ /*- * Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank. * Copyright (C) 1994, 1995, 1997 TooLs GmbH. * All rights reserved. * Original code by Paul Popelka (paulp@uts.amdahl.com) (see below). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Written by Paul Popelka (paulp@uts.amdahl.com) * * You can do anything you want with this software, just don't say you wrote * it, and don't remove this notice. * * This software is provided "as is". * * The author supplies this software to be publicly redistributed on the * understanding that the author is not responsible for the correct * functioning of this software in any circumstances and is not liable for * any damages caused by this software. * * October 1992 */ /* * This is the pc filesystem specific portion of the vnode structure. * * To describe a file uniquely the de_dirclust, de_diroffset, and * de_StartCluster fields are used. * * de_dirclust contains the cluster number of the directory cluster * containing the entry for a file or directory. * de_diroffset is the index into the cluster for the entry describing * a file or directory. * de_StartCluster is the number of the first cluster of the file or directory. * * Now to describe the quirks of the pc filesystem. * - Clusters 0 and 1 are reserved. * - The first allocatable cluster is 2. * - The root directory is of fixed size and all blocks that make it up * are contiguous. * - Cluster 0 refers to the root directory when it is found in the * startcluster field of a directory entry that points to another directory. * - Cluster 0 implies a 0 length file when found in the start cluster field * of a directory entry that points to a file. * - You can't use the cluster number 0 to derive the address of the root * directory. * - Multiple directory entries can point to a directory. The entry in the * parent directory points to a child directory. Any directories in the * child directory contain a ".." entry that points back to the parent. * The child directory itself contains a "." entry that points to itself. * - The root directory does not contain a "." or ".." entry. * - Directory entries for directories are never changed once they are created * (except when removed). The size stays 0, and the last modification time * is never changed. This is because so many directory entries can point to * the physical clusters that make up a directory. It would lead to an * update nightmare. * - The length field in a directory entry pointing to a directory contains 0 * (always). The only way to find the end of a directory is to follow the * cluster chain until the "last cluster" marker is found. * * My extensions to make this house of cards work. These apply only to the in * memory copy of the directory entry. * - A reference count for each denode will be kept since dos doesn't keep such * things. */ /* * Internal pseudo-offset for (nonexistent) directory entry for the root * dir in the root dir */ #define MSDOSFSROOT_OFS 0x1fffffff /* * The FAT cache structure. fc_fsrcn is the filesystem relative cluster * number that corresponds to the file relative cluster number in this * structure (fc_frcn). */ struct fatcache { u_long fc_frcn; /* file relative cluster number */ u_long fc_fsrcn; /* filesystem relative cluster number */ }; /* * The FAT entry cache as it stands helps make extending files a "quick" * operation by avoiding having to scan the FAT to discover the last * cluster of the file. The cache also helps sequential reads by * remembering the last cluster read from the file. This also prevents us * from having to rescan the FAT to find the next cluster to read. This * cache is probably pretty worthless if a file is opened by multiple * processes. */ #define FC_SIZE 3 /* number of entries in the cache */ #define FC_LASTMAP 0 /* entry the last call to pcbmap() resolved * to */ #define FC_LASTFC 1 /* entry for the last cluster in the file */ #define FC_NEXTTOLASTFC 2 /* entry for a close to the last cluster in * the file */ #define FCE_EMPTY 0xffffffff /* doesn't represent an actual cluster # */ /* * Set a slot in the FAT cache. */ #define fc_setcache(dep, slot, frcn, fsrcn) \ (dep)->de_fc[(slot)].fc_frcn = (frcn); \ (dep)->de_fc[(slot)].fc_fsrcn = (fsrcn); /* * This is the in memory variant of a dos directory entry. It is usually * contained within a vnode. */ struct denode { struct vnode *de_vnode; /* addr of vnode we are part of */ u_long de_flag; /* flag bits */ u_long de_dirclust; /* cluster of the directory file containing this entry */ u_long de_diroffset; /* offset of this entry in the directory cluster */ u_long de_fndoffset; /* offset of found dir entry */ int de_fndcnt; /* number of slots before de_fndoffset */ long de_refcnt; /* reference count */ struct msdosfsmount *de_pmp; /* addr of our mount struct */ u_char de_Name[12]; /* name, from DOS directory entry */ u_char de_Attributes; /* attributes, from directory entry */ u_char de_LowerCase; /* NT VFAT lower case flags */ u_char de_CHun; /* Hundredth of second of CTime*/ u_short de_CTime; /* creation time */ u_short de_CDate; /* creation date */ u_short de_ADate; /* access date */ u_short de_MTime; /* modification time */ u_short de_MDate; /* modification date */ u_long de_StartCluster; /* starting cluster of file */ u_long de_FileSize; /* size of file in bytes */ struct fatcache de_fc[FC_SIZE]; /* FAT cache */ u_quad_t de_modrev; /* Revision level for lease. */ uint64_t de_inode; /* Inode number (really byte offset of direntry) */ }; /* * Values for the de_flag field of the denode. */ #define DE_UPDATE 0x0004 /* Modification time update request */ #define DE_CREATE 0x0008 /* Creation time update */ #define DE_ACCESS 0x0010 /* Access time update */ #define DE_MODIFIED 0x0020 /* Denode has been modified */ #define DE_RENAME 0x0040 /* Denode is in the process of being renamed */ /* * Transfer directory entries between internal and external form. * dep is a struct denode * (internal form), * dp is a struct direntry * (external form). */ #define DE_INTERNALIZE32(dep, dp) \ ((dep)->de_StartCluster |= getushort((dp)->deHighClust) << 16) #define DE_INTERNALIZE(dep, dp) \ - (bcopy((dp)->deName, (dep)->de_Name, 11), \ + (memcpy((dep)->de_Name, (dp)->deName, 11), \ (dep)->de_Attributes = (dp)->deAttributes, \ (dep)->de_LowerCase = (dp)->deLowerCase, \ (dep)->de_CHun = (dp)->deCHundredth, \ (dep)->de_CTime = getushort((dp)->deCTime), \ (dep)->de_CDate = getushort((dp)->deCDate), \ (dep)->de_ADate = getushort((dp)->deADate), \ (dep)->de_MTime = getushort((dp)->deMTime), \ (dep)->de_MDate = getushort((dp)->deMDate), \ (dep)->de_StartCluster = getushort((dp)->deStartCluster), \ (dep)->de_FileSize = getulong((dp)->deFileSize), \ (FAT32((dep)->de_pmp) ? DE_INTERNALIZE32((dep), (dp)) : 0)) #define DE_EXTERNALIZE(dp, dep) \ - (bcopy((dep)->de_Name, (dp)->deName, 11), \ + (memcpy((dp)->deName, (dep)->de_Name, 11), \ (dp)->deAttributes = (dep)->de_Attributes, \ (dp)->deLowerCase = (dep)->de_LowerCase, \ (dp)->deCHundredth = (dep)->de_CHun, \ putushort((dp)->deCTime, (dep)->de_CTime), \ putushort((dp)->deCDate, (dep)->de_CDate), \ putushort((dp)->deADate, (dep)->de_ADate), \ putushort((dp)->deMTime, (dep)->de_MTime), \ putushort((dp)->deMDate, (dep)->de_MDate), \ putushort((dp)->deStartCluster, (dep)->de_StartCluster), \ putulong((dp)->deFileSize, \ ((dep)->de_Attributes & ATTR_DIRECTORY) ? 0 : (dep)->de_FileSize), \ putushort((dp)->deHighClust, (dep)->de_StartCluster >> 16)) #ifdef _KERNEL #define VTODE(vp) ((struct denode *)(vp)->v_data) #define DETOV(de) ((de)->de_vnode) #define DETIMES(dep, acc, mod, cre) do { \ if ((dep)->de_flag & DE_UPDATE) { \ (dep)->de_flag |= DE_MODIFIED; \ timespec2fattime((mod), 0, &(dep)->de_MDate, \ &(dep)->de_MTime, NULL); \ (dep)->de_Attributes |= ATTR_ARCHIVE; \ } \ if ((dep)->de_pmp->pm_flags & MSDOSFSMNT_NOWIN95) { \ (dep)->de_flag &= ~(DE_UPDATE | DE_CREATE | DE_ACCESS); \ break; \ } \ if ((dep)->de_flag & DE_ACCESS) { \ uint16_t adate; \ \ timespec2fattime((acc), 0, &adate, NULL, NULL); \ if (adate != (dep)->de_ADate) { \ (dep)->de_flag |= DE_MODIFIED; \ (dep)->de_ADate = adate; \ } \ } \ if ((dep)->de_flag & DE_CREATE) { \ timespec2fattime((cre), 0, &(dep)->de_CDate, \ &(dep)->de_CTime, &(dep)->de_CHun); \ (dep)->de_flag |= DE_MODIFIED; \ } \ (dep)->de_flag &= ~(DE_UPDATE | DE_CREATE | DE_ACCESS); \ } while (0) /* * This overlays the fid structure (see mount.h) */ struct defid { u_short defid_len; /* length of structure */ u_short defid_pad; /* force long alignment */ uint32_t defid_dirclust; /* cluster this dir entry came from */ uint32_t defid_dirofs; /* offset of entry within the cluster */ #if 0 uint32_t defid_gen; /* generation number */ #endif }; extern struct vop_vector msdosfs_vnodeops; int msdosfs_lookup(struct vop_cachedlookup_args *); int msdosfs_inactive(struct vop_inactive_args *); int msdosfs_reclaim(struct vop_reclaim_args *); /* * Internal service routine prototypes. */ int deget(struct msdosfsmount *, u_long, u_long, struct denode **); int uniqdosname(struct denode *, struct componentname *, u_char *); int readep(struct msdosfsmount *pmp, u_long dirclu, u_long dirofs, struct buf **bpp, struct direntry **epp); int readde(struct denode *dep, struct buf **bpp, struct direntry **epp); int deextend(struct denode *dep, u_long length, struct ucred *cred); int fillinusemap(struct msdosfsmount *pmp); void reinsert(struct denode *dep); int dosdirempty(struct denode *dep); int createde(struct denode *dep, struct denode *ddep, struct denode **depp, struct componentname *cnp); int deupdat(struct denode *dep, int waitfor); int removede(struct denode *pdep, struct denode *dep); int detrunc(struct denode *dep, u_long length, int flags, struct ucred *cred); int doscheckpath( struct denode *source, struct denode *target); #endif /* _KERNEL */ Index: projects/clang500-import/sys/fs/msdosfs/msdosfs_conv.c =================================================================== --- projects/clang500-import/sys/fs/msdosfs/msdosfs_conv.c (revision 319548) +++ projects/clang500-import/sys/fs/msdosfs/msdosfs_conv.c (revision 319549) @@ -1,1078 +1,1078 @@ /* $FreeBSD$ */ /* $NetBSD: msdosfs_conv.c,v 1.25 1997/11/17 15:36:40 ws Exp $ */ /*- * Copyright (C) 1995, 1997 Wolfgang Solfrank. * Copyright (C) 1995, 1997 TooLs GmbH. * All rights reserved. * Original code by Paul Popelka (paulp@uts.amdahl.com) (see below). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Written by Paul Popelka (paulp@uts.amdahl.com) * * You can do anything you want with this software, just don't say you wrote * it, and don't remove this notice. * * This software is provided "as is". * * The author supplies this software to be publicly redistributed on the * understanding that the author is not responsible for the correct * functioning of this software in any circumstances and is not liable for * any damages caused by this software. * * October 1992 */ #include #include #include #include #include #include #include #include extern struct iconv_functions *msdosfs_iconv; static int mbsadjpos(const char **, size_t, size_t, int, int, void *handle); static u_char * dos2unixchr(u_char *, const u_char **, size_t *, int, struct msdosfsmount *); static uint16_t unix2doschr(const u_char **, size_t *, struct msdosfsmount *); static u_char * win2unixchr(u_char *, uint16_t, struct msdosfsmount *); static uint16_t unix2winchr(const u_char **, size_t *, int, struct msdosfsmount *); /* * 0 - character disallowed in long file name. * 1 - character should be replaced by '_' in DOS file name, * and generation number inserted. * 2 - character ('.' and ' ') should be skipped in DOS file name, * and generation number inserted. */ static const u_char unix2dos[256] = { /* iso8859-1 -> cp850 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 00-07 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 08-0f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 10-17 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 18-1f */ 2, 0x21, 0, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20-27 */ 0x28, 0x29, 0, 1, 1, 0x2d, 2, 0, /* 28-2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 30-37 */ 0x38, 0x39, 0, 1, 0, 1, 0, 0, /* 38-3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 40-47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 48-4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 50-57 */ 0x58, 0x59, 0x5a, 1, 0, 1, 0x5e, 0x5f, /* 58-5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 60-67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 68-6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 70-77 */ 0x58, 0x59, 0x5a, 0x7b, 0, 0x7d, 0x7e, 0, /* 78-7f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 80-87 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 88-8f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 90-97 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 98-9f */ 0, 0xad, 0xbd, 0x9c, 0xcf, 0xbe, 0xdd, 0xf5, /* a0-a7 */ 0xf9, 0xb8, 0xa6, 0xae, 0xaa, 0xf0, 0xa9, 0xee, /* a8-af */ 0xf8, 0xf1, 0xfd, 0xfc, 0xef, 0xe6, 0xf4, 0xfa, /* b0-b7 */ 0xf7, 0xfb, 0xa7, 0xaf, 0xac, 0xab, 0xf3, 0xa8, /* b8-bf */ 0xb7, 0xb5, 0xb6, 0xc7, 0x8e, 0x8f, 0x92, 0x80, /* c0-c7 */ 0xd4, 0x90, 0xd2, 0xd3, 0xde, 0xd6, 0xd7, 0xd8, /* c8-cf */ 0xd1, 0xa5, 0xe3, 0xe0, 0xe2, 0xe5, 0x99, 0x9e, /* d0-d7 */ 0x9d, 0xeb, 0xe9, 0xea, 0x9a, 0xed, 0xe8, 0xe1, /* d8-df */ 0xb7, 0xb5, 0xb6, 0xc7, 0x8e, 0x8f, 0x92, 0x80, /* e0-e7 */ 0xd4, 0x90, 0xd2, 0xd3, 0xde, 0xd6, 0xd7, 0xd8, /* e8-ef */ 0xd1, 0xa5, 0xe3, 0xe0, 0xe2, 0xe5, 0x99, 0xf6, /* f0-f7 */ 0x9d, 0xeb, 0xe9, 0xea, 0x9a, 0xed, 0xe8, 0x98, /* f8-ff */ }; static const u_char dos2unix[256] = { /* cp850 -> iso8859-1 */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, /* 00-07 */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, /* 08-0f */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, /* 10-17 */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, /* 18-1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20-27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 28-2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 30-37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 38-3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 40-47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 48-4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 50-57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 58-5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 60-67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 68-6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 70-77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 78-7f */ 0xc7, 0xfc, 0xe9, 0xe2, 0xe4, 0xe0, 0xe5, 0xe7, /* 80-87 */ 0xea, 0xeb, 0xe8, 0xef, 0xee, 0xec, 0xc4, 0xc5, /* 88-8f */ 0xc9, 0xe6, 0xc6, 0xf4, 0xf6, 0xf2, 0xfb, 0xf9, /* 90-97 */ 0xff, 0xd6, 0xdc, 0xf8, 0xa3, 0xd8, 0xd7, 0x3f, /* 98-9f */ 0xe1, 0xed, 0xf3, 0xfa, 0xf1, 0xd1, 0xaa, 0xba, /* a0-a7 */ 0xbf, 0xae, 0xac, 0xbd, 0xbc, 0xa1, 0xab, 0xbb, /* a8-af */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xc1, 0xc2, 0xc0, /* b0-b7 */ 0xa9, 0x3f, 0x3f, 0x3f, 0x3f, 0xa2, 0xa5, 0x3f, /* b8-bf */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xe3, 0xc3, /* c0-c7 */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0xa4, /* c8-cf */ 0xf0, 0xd0, 0xca, 0xcb, 0xc8, 0x3f, 0xcd, 0xce, /* d0-d7 */ 0xcf, 0x3f, 0x3f, 0x3f, 0x3f, 0xa6, 0xcc, 0x3f, /* d8-df */ 0xd3, 0xdf, 0xd4, 0xd2, 0xf5, 0xd5, 0xb5, 0xfe, /* e0-e7 */ 0xde, 0xda, 0xdb, 0xd9, 0xfd, 0xdd, 0xaf, 0x3f, /* e8-ef */ 0xad, 0xb1, 0x3f, 0xbe, 0xb6, 0xa7, 0xf7, 0xb8, /* f0-f7 */ 0xb0, 0xa8, 0xb7, 0xb9, 0xb3, 0xb2, 0x3f, 0x3f, /* f8-ff */ }; static const u_char u2l[256] = { /* tolower */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 00-07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 08-0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 10-17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 18-1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20-27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 28-2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 30-37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 38-3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 40-47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 48-4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 50-57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 58-5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 60-67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 68-6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 70-77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 78-7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 80-87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 88-8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 90-97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 98-9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* a0-a7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* a8-af */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* b0-b7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* b8-bf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* c0-c7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* c8-cf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* d0-d7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* d8-df */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* e0-e7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* e8-ef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* f0-f7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* f8-ff */ }; static const u_char l2u[256] = { /* toupper */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 00-07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 08-0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 10-17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 18-1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20-27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 28-2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 30-37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 38-3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 40-47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 48-4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 50-57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 58-5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 60-67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 68-6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 70-77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 78-7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 80-87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 88-8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 90-97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 98-9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* a0-a7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* a8-af */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* b0-b7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* b8-bf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* c0-c7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* c8-cf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* d0-d7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* d8-df */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* e0-e7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* e8-ef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* f0-f7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* f8-ff */ }; /* * DOS filenames are made of 2 parts, the name part and the extension part. * The name part is 8 characters long and the extension part is 3 * characters long. They may contain trailing blanks if the name or * extension are not long enough to fill their respective fields. */ /* * Convert a DOS filename to a unix filename. And, return the number of * characters in the resulting unix filename excluding the terminating * null. */ int dos2unixfn(u_char dn[11], u_char *un, int lower, struct msdosfsmount *pmp) { size_t i; int thislong = 0; u_char *c, tmpbuf[5]; /* * If first char of the filename is SLOT_E5 (0x05), then the real * first char of the filename should be 0xe5. But, they couldn't * just have a 0xe5 mean 0xe5 because that is used to mean a freed * directory slot. Another dos quirk. */ if (*dn == SLOT_E5) *dn = 0xe5; /* * Copy the name portion into the unix filename string. */ for (i = 8; i > 0 && *dn != ' ';) { c = dos2unixchr(tmpbuf, __DECONST(const u_char **, &dn), &i, lower & LCASE_BASE, pmp); while (*c != '\0') { *un++ = *c++; thislong++; } } dn += i; /* * Now, if there is an extension then put in a period and copy in * the extension. */ if (*dn != ' ') { *un++ = '.'; thislong++; for (i = 3; i > 0 && *dn != ' ';) { c = dos2unixchr(tmpbuf, __DECONST(const u_char **, &dn), &i, lower & LCASE_EXT, pmp); while (*c != '\0') { *un++ = *c++; thislong++; } } } *un++ = 0; return (thislong); } /* * Convert a unix filename to a DOS filename according to Win95 rules. * If applicable and gen is not 0, it is inserted into the converted * filename as a generation number. * Returns * 0 if name couldn't be converted * 1 if the converted name is the same as the original * (no long filename entry necessary for Win95) * 2 if conversion was successful * 3 if conversion was successful and generation number was inserted */ int unix2dosfn(const u_char *un, u_char dn[12], size_t unlen, u_int gen, struct msdosfsmount *pmp) { ssize_t i, j; int l; int conv = 1; const u_char *cp, *dp, *dp1; u_char gentext[6], *wcp; uint16_t c; /* * Fill the dos filename string with blanks. These are DOS's pad * characters. */ for (i = 0; i < 11; i++) dn[i] = ' '; dn[11] = 0; /* * The filenames "." and ".." are handled specially, since they * don't follow dos filename rules. */ if (un[0] == '.' && unlen == 1) { dn[0] = '.'; return gen <= 1; } if (un[0] == '.' && un[1] == '.' && unlen == 2) { dn[0] = '.'; dn[1] = '.'; return gen <= 1; } /* * Filenames with only blanks and dots are not allowed! */ for (cp = un, i = unlen; --i >= 0; cp++) if (*cp != ' ' && *cp != '.') break; if (i < 0) return 0; /* * Filenames with some characters are not allowed! */ for (cp = un, i = unlen; i > 0;) if (unix2doschr(&cp, (size_t *)&i, pmp) == 0) return 0; /* * Now find the extension * Note: dot as first char doesn't start extension * and trailing dots and blanks are ignored * Note(2003/7): It seems recent Windows has * defferent rule than this code, that Windows * ignores all dots before extension, and use all * chars as filename except for dots. */ dp = dp1 = NULL; for (cp = un + 1, i = unlen - 1; --i >= 0;) { switch (*cp++) { case '.': if (!dp1) dp1 = cp; break; case ' ': break; default: if (dp1) dp = dp1; dp1 = NULL; break; } } /* * Now convert it (this part is for extension). * As Windows XP do, if it's not ascii char, * this function should return 2 or 3, so that checkng out Unicode name. */ if (dp) { if (dp1) l = dp1 - dp; else l = unlen - (dp - un); for (cp = dp, i = l, j = 8; i > 0 && j < 11; j++) { c = unix2doschr(&cp, (size_t *)&i, pmp); if (c & 0xff00) { dn[j] = c >> 8; if (++j < 11) { dn[j] = c; if (conv != 3) conv = 2; continue; } else { conv = 3; dn[j-1] = ' '; break; } } else { dn[j] = c; } if (((dn[j] & 0x80) || *(cp - 1) != dn[j]) && conv != 3) conv = 2; if (dn[j] == 1) { conv = 3; dn[j] = '_'; } if (dn[j] == 2) { conv = 3; dn[j--] = ' '; } } if (i > 0) conv = 3; dp--; } else { for (dp = cp; *--dp == ' ' || *dp == '.';); dp++; } /* * Now convert the rest of the name */ for (i = dp - un, j = 0; un < dp && j < 8; j++) { c = unix2doschr(&un, &i, pmp); if (c & 0xff00) { dn[j] = c >> 8; if (++j < 8) { dn[j] = c; if (conv != 3) conv = 2; continue; } else { conv = 3; dn[j-1] = ' '; break; } } else { dn[j] = c; } if (((dn[j] & 0x80) || *(un - 1) != dn[j]) && conv != 3) conv = 2; if (dn[j] == 1) { conv = 3; dn[j] = '_'; } if (dn[j] == 2) { conv = 3; dn[j--] = ' '; } } if (un < dp) conv = 3; /* * If we didn't have any chars in filename, * generate a default */ if (!j) dn[0] = '_'; /* * If there wasn't any char dropped, * there is no place for generation numbers */ if (conv != 3) { if (gen > 1) conv = 0; goto done; } /* * Now insert the generation number into the filename part */ if (gen == 0) goto done; for (wcp = gentext + sizeof(gentext); wcp > gentext && gen; gen /= 10) *--wcp = gen % 10 + '0'; if (gen) { conv = 0; goto done; } for (i = 8; dn[--i] == ' ';); i++; if (gentext + sizeof(gentext) - wcp + 1 > 8 - i) i = 8 - (gentext + sizeof(gentext) - wcp + 1); /* * Correct posision to where insert the generation number */ cp = dn; i -= mbsadjpos((const char**)&cp, i, unlen, 1, pmp->pm_flags, pmp->pm_d2u); dn[i++] = '~'; while (wcp < gentext + sizeof(gentext)) dn[i++] = *wcp++; /* * Tail of the filename should be space */ while (i < 8) dn[i++] = ' '; conv = 3; done: /* * The first character cannot be E5, * because that means a deleted entry */ if (dn[0] == 0xe5) dn[0] = SLOT_E5; return conv; } /* * Create a Win95 long name directory entry * Note: assumes that the filename is valid, * i.e. doesn't consist solely of blanks and dots */ int unix2winfn(const u_char *un, size_t unlen, struct winentry *wep, int cnt, int chksum, struct msdosfsmount *pmp) { uint8_t *wcp; int i, end; uint16_t code; /* * Drop trailing blanks and dots */ unlen = winLenFixup(un, unlen); /* * Cut *un for this slot */ unlen = mbsadjpos((const char **)&un, unlen, (cnt - 1) * WIN_CHARS, 2, pmp->pm_flags, pmp->pm_u2w); /* * Initialize winentry to some useful default */ - for (wcp = (uint8_t *)wep, i = sizeof(*wep); --i >= 0; *wcp++ = 0xff); + memset(wep, 0xff, sizeof(*wep)); wep->weCnt = cnt; wep->weAttributes = ATTR_WIN95; wep->weReserved1 = 0; wep->weChksum = chksum; wep->weReserved2 = 0; /* * Now convert the filename parts */ end = 0; for (wcp = wep->wePart1, i = sizeof(wep->wePart1)/2; --i >= 0 && !end;) { code = unix2winchr(&un, &unlen, 0, pmp); *wcp++ = code; *wcp++ = code >> 8; if (!code) end = WIN_LAST; } for (wcp = wep->wePart2, i = sizeof(wep->wePart2)/2; --i >= 0 && !end;) { code = unix2winchr(&un, &unlen, 0, pmp); *wcp++ = code; *wcp++ = code >> 8; if (!code) end = WIN_LAST; } for (wcp = wep->wePart3, i = sizeof(wep->wePart3)/2; --i >= 0 && !end;) { code = unix2winchr(&un, &unlen, 0, pmp); *wcp++ = code; *wcp++ = code >> 8; if (!code) end = WIN_LAST; } if (*un == '\0') end = WIN_LAST; wep->weCnt |= end; return !end; } /* * Compare our filename to the one in the Win95 entry * Returns the checksum or -1 if no match */ int winChkName(struct mbnambuf *nbp, const u_char *un, size_t unlen, int chksum, struct msdosfsmount *pmp) { size_t len; uint16_t c1, c2; u_char *np; struct dirent dirbuf; /* * We already have winentry in *nbp. */ if (!mbnambuf_flush(nbp, &dirbuf) || dirbuf.d_namlen == 0) return -1; #ifdef MSDOSFS_DEBUG printf("winChkName(): un=%s:%d,d_name=%s:%d\n", un, unlen, dirbuf.d_name, dirbuf.d_namlen); #endif /* * Compare the name parts */ len = dirbuf.d_namlen; if (unlen != len) return -2; for (np = dirbuf.d_name; unlen > 0 && len > 0;) { /* * Comparison must be case insensitive, because FAT disallows * to look up or create files in case sensitive even when * it's a long file name. */ c1 = unix2winchr(__DECONST(const u_char **, &np), &len, LCASE_BASE, pmp); c2 = unix2winchr(&un, &unlen, LCASE_BASE, pmp); if (c1 != c2) return -2; } return chksum; } /* * Convert Win95 filename to dirbuf. * Returns the checksum or -1 if impossible */ int win2unixfn(struct mbnambuf *nbp, struct winentry *wep, int chksum, struct msdosfsmount *pmp) { u_char *c, tmpbuf[5]; uint8_t *cp; uint8_t *np, name[WIN_CHARS * 3 + 1]; uint16_t code; int i; if ((wep->weCnt&WIN_CNT) > howmany(WIN_MAXLEN, WIN_CHARS) || !(wep->weCnt&WIN_CNT)) return -1; /* * First compare checksums */ if (wep->weCnt&WIN_LAST) { chksum = wep->weChksum; } else if (chksum != wep->weChksum) chksum = -1; if (chksum == -1) return -1; /* * Convert the name parts */ np = name; for (cp = wep->wePart1, i = sizeof(wep->wePart1)/2; --i >= 0;) { code = (cp[1] << 8) | cp[0]; switch (code) { case 0: *np = '\0'; if (mbnambuf_write(nbp, name, (wep->weCnt & WIN_CNT) - 1) != 0) return -1; return chksum; case '/': *np = '\0'; return -1; default: c = win2unixchr(tmpbuf, code, pmp); while (*c != '\0') *np++ = *c++; break; } cp += 2; } for (cp = wep->wePart2, i = sizeof(wep->wePart2)/2; --i >= 0;) { code = (cp[1] << 8) | cp[0]; switch (code) { case 0: *np = '\0'; if (mbnambuf_write(nbp, name, (wep->weCnt & WIN_CNT) - 1) != 0) return -1; return chksum; case '/': *np = '\0'; return -1; default: c = win2unixchr(tmpbuf, code, pmp); while (*c != '\0') *np++ = *c++; break; } cp += 2; } for (cp = wep->wePart3, i = sizeof(wep->wePart3)/2; --i >= 0;) { code = (cp[1] << 8) | cp[0]; switch (code) { case 0: *np = '\0'; if (mbnambuf_write(nbp, name, (wep->weCnt & WIN_CNT) - 1) != 0) return -1; return chksum; case '/': *np = '\0'; return -1; default: c = win2unixchr(tmpbuf, code, pmp); while (*c != '\0') *np++ = *c++; break; } cp += 2; } *np = '\0'; if (mbnambuf_write(nbp, name, (wep->weCnt & WIN_CNT) - 1) != 0) return -1; return chksum; } /* * Compute the unrolled checksum of a DOS filename for Win95 LFN use. */ uint8_t winChksum(uint8_t *name) { int i; uint8_t s; for (s = 0, i = 11; --i >= 0; s += *name++) s = (s << 7)|(s >> 1); return (s); } /* * Determine the number of slots necessary for Win95 names */ int winSlotCnt(const u_char *un, size_t unlen, struct msdosfsmount *pmp) { size_t wlen; char wn[WIN_MAXLEN * 2 + 1], *wnp; unlen = winLenFixup(un, unlen); if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) { wlen = WIN_MAXLEN * 2; wnp = wn; msdosfs_iconv->conv(pmp->pm_u2w, (const char **)&un, &unlen, &wnp, &wlen); if (unlen > 0) return 0; return howmany(WIN_MAXLEN - wlen/2, WIN_CHARS); } if (unlen > WIN_MAXLEN) return 0; return howmany(unlen, WIN_CHARS); } /* * Determine the number of bytes necessary for Win95 names */ size_t winLenFixup(const u_char *un, size_t unlen) { for (un += unlen; unlen > 0; unlen--) if (*--un != ' ' && *un != '.') break; return unlen; } /* * Store an area with multi byte string instr, and returns left * byte of instr and moves pointer forward. The area's size is * inlen or outlen. */ static int mbsadjpos(const char **instr, size_t inlen, size_t outlen, int weight, int flag, void *handle) { char *outp, outstr[outlen * weight + 1]; if (flag & MSDOSFSMNT_KICONV && msdosfs_iconv) { outp = outstr; outlen *= weight; msdosfs_iconv->conv(handle, instr, &inlen, &outp, &outlen); return (inlen); } (*instr) += min(inlen, outlen); return (inlen - min(inlen, outlen)); } /* * Convert DOS char to Local char */ static u_char * dos2unixchr(u_char *outbuf, const u_char **instr, size_t *ilen, int lower, struct msdosfsmount *pmp) { u_char c, *outp; size_t len, olen; outp = outbuf; if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) { olen = len = 4; if (lower & (LCASE_BASE | LCASE_EXT)) msdosfs_iconv->convchr_case(pmp->pm_d2u, (const char **)instr, ilen, (char **)&outp, &olen, KICONV_LOWER); else msdosfs_iconv->convchr(pmp->pm_d2u, (const char **)instr, ilen, (char **)&outp, &olen); len -= olen; /* * return '?' if failed to convert */ if (len == 0) { (*ilen)--; (*instr)++; *outp++ = '?'; } } else { (*ilen)--; c = *(*instr)++; c = dos2unix[c]; if (lower & (LCASE_BASE | LCASE_EXT)) c = u2l[c]; *outp++ = c; outbuf[1] = '\0'; } *outp = '\0'; outp = outbuf; return (outp); } /* * Convert Local char to DOS char */ static uint16_t unix2doschr(const u_char **instr, size_t *ilen, struct msdosfsmount *pmp) { u_char c; char *up, *outp, unicode[3], outbuf[3]; uint16_t wc; size_t len, ucslen, unixlen, olen; if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) { /* * to hide an invisible character, using a unicode filter */ ucslen = 2; len = *ilen; up = unicode; msdosfs_iconv->convchr(pmp->pm_u2w, (const char **)instr, ilen, &up, &ucslen); unixlen = len - *ilen; /* * cannot be converted */ if (unixlen == 0) { (*ilen)--; (*instr)++; return (0); } /* * return magic number for ascii char */ if (unixlen == 1) { c = *(*instr -1); if (! (c & 0x80)) { c = unix2dos[c]; if (c <= 2) return (c); } } /* * now convert using libiconv */ *instr -= unixlen; *ilen = len; olen = len = 2; outp = outbuf; msdosfs_iconv->convchr_case(pmp->pm_u2d, (const char **)instr, ilen, &outp, &olen, KICONV_FROM_UPPER); len -= olen; /* * cannot be converted, but has unicode char, should return magic number */ if (len == 0) { (*ilen) -= unixlen; (*instr) += unixlen; return (1); } wc = 0; while(len--) wc |= (*(outp - len - 1) & 0xff) << (len << 3); return (wc); } (*ilen)--; c = *(*instr)++; c = l2u[c]; c = unix2dos[c]; return ((uint16_t)c); } /* * Convert Windows char to Local char */ static u_char * win2unixchr(u_char *outbuf, uint16_t wc, struct msdosfsmount *pmp) { u_char *inp, *outp, inbuf[3]; size_t ilen, olen, len; outp = outbuf; if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) { inbuf[0] = (u_char)(wc>>8); inbuf[1] = (u_char)wc; inbuf[2] = '\0'; ilen = 2; olen = len = 4; inp = inbuf; msdosfs_iconv->convchr(pmp->pm_w2u, __DECONST(const char **, &inp), &ilen, (char **)&outp, &olen); len -= olen; /* * return '?' if failed to convert */ if (len == 0) *outp++ = '?'; } else { *outp++ = (wc & 0xff00) ? '?' : (u_char)(wc & 0xff); } *outp = '\0'; outp = outbuf; return (outp); } /* * Convert Local char to Windows char */ static uint16_t unix2winchr(const u_char **instr, size_t *ilen, int lower, struct msdosfsmount *pmp) { u_char *outp, outbuf[3]; uint16_t wc; size_t olen; if (*ilen == 0) return (0); if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) { outp = outbuf; olen = 2; if (lower & (LCASE_BASE | LCASE_EXT)) msdosfs_iconv->convchr_case(pmp->pm_u2w, (const char **)instr, ilen, (char **)&outp, &olen, KICONV_FROM_LOWER); else msdosfs_iconv->convchr(pmp->pm_u2w, (const char **)instr, ilen, (char **)&outp, &olen); /* * return '0' if end of filename */ if (olen == 2) return (0); wc = (outbuf[0]<<8) | outbuf[1]; return (wc); } (*ilen)--; wc = (*instr)[0]; if (lower & (LCASE_BASE | LCASE_EXT)) wc = u2l[wc]; (*instr)++; return (wc); } /* * Initialize the temporary concatenation buffer. */ void mbnambuf_init(struct mbnambuf *nbp) { nbp->nb_len = 0; nbp->nb_last_id = -1; nbp->nb_buf[sizeof(nbp->nb_buf) - 1] = '\0'; } /* * Fill out our concatenation buffer with the given substring, at the offset * specified by its id. Since this function must be called with ids in * descending order, we take advantage of the fact that ASCII substrings are * exactly WIN_CHARS in length. For non-ASCII substrings, we shift all * previous (i.e. higher id) substrings upwards to make room for this one. * This only penalizes portions of substrings that contain more than * WIN_CHARS bytes when they are first encountered. */ int mbnambuf_write(struct mbnambuf *nbp, char *name, int id) { char *slot; size_t count, newlen; if (nbp->nb_len != 0 && id != nbp->nb_last_id - 1) { #ifdef MSDOSFS_DEBUG printf("msdosfs: non-decreasing id: id %d, last id %d\n", id, nbp->nb_last_id); #endif return (EINVAL); } /* Will store this substring in a WIN_CHARS-aligned slot. */ slot = &nbp->nb_buf[id * WIN_CHARS]; count = strlen(name); newlen = nbp->nb_len + count; if (newlen > WIN_MAXLEN || newlen > MAXNAMLEN) { #ifdef MSDOSFS_DEBUG printf("msdosfs: file name length %zu too large\n", newlen); #endif return (ENAMETOOLONG); } /* Shift suffix upwards by the amount length exceeds WIN_CHARS. */ if (count > WIN_CHARS && nbp->nb_len != 0) { if ((id * WIN_CHARS + count + nbp->nb_len) > sizeof(nbp->nb_buf)) return (ENAMETOOLONG); - bcopy(slot + WIN_CHARS, slot + count, nbp->nb_len); + memmove(slot + count, slot + WIN_CHARS, nbp->nb_len); } /* Copy in the substring to its slot and update length so far. */ - bcopy(name, slot, count); + memcpy(slot, name, count); nbp->nb_len = newlen; nbp->nb_last_id = id; return (0); } /* * Take the completed string and use it to setup the struct dirent. * Be sure to always nul-terminate the d_name and then copy the string * from our buffer. Note that this function assumes the full string has * been reassembled in the buffer. If it's called before all substrings * have been written via mbnambuf_write(), the result will be incorrect. */ char * mbnambuf_flush(struct mbnambuf *nbp, struct dirent *dp) { if (nbp->nb_len > sizeof(dp->d_name) - 1) { mbnambuf_init(nbp); return (NULL); } - bcopy(&nbp->nb_buf[0], dp->d_name, nbp->nb_len); + memcpy(dp->d_name, &nbp->nb_buf[0], nbp->nb_len); dp->d_name[nbp->nb_len] = '\0'; dp->d_namlen = nbp->nb_len; mbnambuf_init(nbp); return (dp->d_name); } Index: projects/clang500-import/sys/fs/msdosfs/msdosfs_denode.c =================================================================== --- projects/clang500-import/sys/fs/msdosfs/msdosfs_denode.c (revision 319548) +++ projects/clang500-import/sys/fs/msdosfs/msdosfs_denode.c (revision 319549) @@ -1,615 +1,615 @@ /* $FreeBSD$ */ /* $NetBSD: msdosfs_denode.c,v 1.28 1998/02/10 14:10:00 mrg Exp $ */ /*- * Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank. * Copyright (C) 1994, 1995, 1997 TooLs GmbH. * All rights reserved. * Original code by Paul Popelka (paulp@uts.amdahl.com) (see below). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Written by Paul Popelka (paulp@uts.amdahl.com) * * You can do anything you want with this software, just don't say you wrote * it, and don't remove this notice. * * This software is provided "as is". * * The author supplies this software to be publicly redistributed on the * understanding that the author is not responsible for the correct * functioning of this software in any circumstances and is not liable for * any damages caused by this software. * * October 1992 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_MSDOSFSNODE, "msdosfs_node", "MSDOSFS vnode private part"); static int de_vncmpf(struct vnode *vp, void *arg) { struct denode *de; uint64_t *a; a = arg; de = VTODE(vp); return (de->de_inode != *a); } /* * If deget() succeeds it returns with the gotten denode locked(). * * pmp - address of msdosfsmount structure of the filesystem containing * the denode of interest. The address of * the msdosfsmount structure are used. * dirclust - which cluster bp contains, if dirclust is 0 (root directory) * diroffset is relative to the beginning of the root directory, * otherwise it is cluster relative. * diroffset - offset past begin of cluster of denode we want * depp - returns the address of the gotten denode. */ int deget(struct msdosfsmount *pmp, u_long dirclust, u_long diroffset, struct denode **depp) { int error; uint64_t inode; struct mount *mntp = pmp->pm_mountp; struct direntry *direntptr; struct denode *ldep; struct vnode *nvp, *xvp; struct buf *bp; #ifdef MSDOSFS_DEBUG printf("deget(pmp %p, dirclust %lu, diroffset %lx, depp %p)\n", pmp, dirclust, diroffset, depp); #endif /* * On FAT32 filesystems, root is a (more or less) normal * directory */ if (FAT32(pmp) && dirclust == MSDOSFSROOT) dirclust = pmp->pm_rootdirblk; /* * See if the denode is in the denode cache. Use the location of * the directory entry to compute the hash value. For subdir use * address of "." entry. For root dir (if not FAT32) use cluster * MSDOSFSROOT, offset MSDOSFSROOT_OFS * * NOTE: The check for de_refcnt > 0 below insures the denode being * examined does not represent an unlinked but still open file. * These files are not to be accessible even when the directory * entry that represented the file happens to be reused while the * deleted file is still open. */ inode = (uint64_t)pmp->pm_bpcluster * dirclust + diroffset; error = vfs_hash_get(mntp, inode, LK_EXCLUSIVE, curthread, &nvp, de_vncmpf, &inode); if (error) return (error); if (nvp != NULL) { *depp = VTODE(nvp); KASSERT((*depp)->de_dirclust == dirclust, ("wrong dirclust")); KASSERT((*depp)->de_diroffset == diroffset, ("wrong diroffset")); return (0); } ldep = malloc(sizeof(struct denode), M_MSDOSFSNODE, M_WAITOK | M_ZERO); /* * Directory entry was not in cache, have to create a vnode and * copy it from the passed disk buffer. */ /* getnewvnode() does a VREF() on the vnode */ error = getnewvnode("msdosfs", mntp, &msdosfs_vnodeops, &nvp); if (error) { *depp = NULL; free(ldep, M_MSDOSFSNODE); return error; } nvp->v_data = ldep; ldep->de_vnode = nvp; ldep->de_flag = 0; ldep->de_dirclust = dirclust; ldep->de_diroffset = diroffset; ldep->de_inode = inode; lockmgr(nvp->v_vnlock, LK_EXCLUSIVE, NULL); fc_purge(ldep, 0); /* init the FAT cache for this denode */ error = insmntque(nvp, mntp); if (error != 0) { free(ldep, M_MSDOSFSNODE); *depp = NULL; return (error); } error = vfs_hash_insert(nvp, inode, LK_EXCLUSIVE, curthread, &xvp, de_vncmpf, &inode); if (error) { *depp = NULL; return (error); } if (xvp != NULL) { *depp = xvp->v_data; return (0); } ldep->de_pmp = pmp; ldep->de_refcnt = 1; /* * Copy the directory entry into the denode area of the vnode. */ if ((dirclust == MSDOSFSROOT || (FAT32(pmp) && dirclust == pmp->pm_rootdirblk)) && diroffset == MSDOSFSROOT_OFS) { /* * Directory entry for the root directory. There isn't one, * so we manufacture one. We should probably rummage * through the root directory and find a label entry (if it * exists), and then use the time and date from that entry * as the time and date for the root denode. */ nvp->v_vflag |= VV_ROOT; /* should be further down XXX */ ldep->de_Attributes = ATTR_DIRECTORY; ldep->de_LowerCase = 0; if (FAT32(pmp)) ldep->de_StartCluster = pmp->pm_rootdirblk; /* de_FileSize will be filled in further down */ else { ldep->de_StartCluster = MSDOSFSROOT; ldep->de_FileSize = pmp->pm_rootdirsize * DEV_BSIZE; } /* * fill in time and date so that fattime2timespec() doesn't * spit up when called from msdosfs_getattr() with root * denode */ ldep->de_CHun = 0; ldep->de_CTime = 0x0000; /* 00:00:00 */ ldep->de_CDate = (0 << DD_YEAR_SHIFT) | (1 << DD_MONTH_SHIFT) | (1 << DD_DAY_SHIFT); /* Jan 1, 1980 */ ldep->de_ADate = ldep->de_CDate; ldep->de_MTime = ldep->de_CTime; ldep->de_MDate = ldep->de_CDate; /* leave the other fields as garbage */ } else { error = readep(pmp, dirclust, diroffset, &bp, &direntptr); if (error) { /* * The denode does not contain anything useful, so * it would be wrong to leave it on its hash chain. * Arrange for vput() to just forget about it. */ ldep->de_Name[0] = SLOT_DELETED; vput(nvp); *depp = NULL; return (error); } (void)DE_INTERNALIZE(ldep, direntptr); brelse(bp); } /* * Fill in a few fields of the vnode and finish filling in the * denode. Then return the address of the found denode. */ if (ldep->de_Attributes & ATTR_DIRECTORY) { /* * Since DOS directory entries that describe directories * have 0 in the filesize field, we take this opportunity * to find out the length of the directory and plug it into * the denode structure. */ u_long size; /* * XXX it sometimes happens that the "." entry has cluster * number 0 when it shouldn't. Use the actual cluster number * instead of what is written in directory entry. */ if (diroffset == 0 && ldep->de_StartCluster != dirclust) { #ifdef MSDOSFS_DEBUG printf("deget(): \".\" entry at clust %lu != %lu\n", dirclust, ldep->de_StartCluster); #endif ldep->de_StartCluster = dirclust; } nvp->v_type = VDIR; if (ldep->de_StartCluster != MSDOSFSROOT) { error = pcbmap(ldep, 0xffff, 0, &size, 0); if (error == E2BIG) { ldep->de_FileSize = de_cn2off(pmp, size); error = 0; } else { #ifdef MSDOSFS_DEBUG printf("deget(): pcbmap returned %d\n", error); #endif } } } else nvp->v_type = VREG; ldep->de_modrev = init_va_filerev(); *depp = ldep; return (0); } int deupdat(struct denode *dep, int waitfor) { struct direntry dir; struct timespec ts; struct buf *bp; struct direntry *dirp; int error; if (DETOV(dep)->v_mount->mnt_flag & MNT_RDONLY) { dep->de_flag &= ~(DE_UPDATE | DE_CREATE | DE_ACCESS | DE_MODIFIED); return (0); } getnanotime(&ts); DETIMES(dep, &ts, &ts, &ts); if ((dep->de_flag & DE_MODIFIED) == 0 && waitfor == 0) return (0); dep->de_flag &= ~DE_MODIFIED; if (DETOV(dep)->v_vflag & VV_ROOT) return (EINVAL); if (dep->de_refcnt <= 0) return (0); error = readde(dep, &bp, &dirp); if (error) return (error); DE_EXTERNALIZE(&dir, dep); if (bcmp(dirp, &dir, sizeof(dir)) == 0) { if (waitfor == 0 || (bp->b_flags & B_DELWRI) == 0) { brelse(bp); return (0); } } else *dirp = dir; if ((DETOV(dep)->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) bp->b_flags |= B_CLUSTEROK; if (waitfor) error = bwrite(bp); else if (vm_page_count_severe() || buf_dirty_count_severe()) bawrite(bp); else bdwrite(bp); return (error); } /* * Truncate the file described by dep to the length specified by length. */ int detrunc(struct denode *dep, u_long length, int flags, struct ucred *cred) { int error; int allerror; u_long eofentry; u_long chaintofree; daddr_t bn; int boff; int isadir = dep->de_Attributes & ATTR_DIRECTORY; struct buf *bp; struct msdosfsmount *pmp = dep->de_pmp; #ifdef MSDOSFS_DEBUG printf("detrunc(): file %s, length %lu, flags %x\n", dep->de_Name, length, flags); #endif /* * Disallow attempts to truncate the root directory since it is of * fixed size. That's just the way dos filesystems are. We use * the VROOT bit in the vnode because checking for the directory * bit and a startcluster of 0 in the denode is not adequate to * recognize the root directory at this point in a file or * directory's life. */ if ((DETOV(dep)->v_vflag & VV_ROOT) && !FAT32(pmp)) { #ifdef MSDOSFS_DEBUG printf("detrunc(): can't truncate root directory, clust %ld, offset %ld\n", dep->de_dirclust, dep->de_diroffset); #endif return (EINVAL); } if (dep->de_FileSize < length) { vnode_pager_setsize(DETOV(dep), length); return deextend(dep, length, cred); } /* * If the desired length is 0 then remember the starting cluster of * the file and set the StartCluster field in the directory entry * to 0. If the desired length is not zero, then get the number of * the last cluster in the shortened file. Then get the number of * the first cluster in the part of the file that is to be freed. * Then set the next cluster pointer in the last cluster of the * file to CLUST_EOFE. */ if (length == 0) { chaintofree = dep->de_StartCluster; dep->de_StartCluster = 0; eofentry = ~0; } else { error = pcbmap(dep, de_clcount(pmp, length) - 1, 0, &eofentry, 0); if (error) { #ifdef MSDOSFS_DEBUG printf("detrunc(): pcbmap fails %d\n", error); #endif return (error); } } fc_purge(dep, de_clcount(pmp, length)); /* * If the new length is not a multiple of the cluster size then we * must zero the tail end of the new last cluster in case it * becomes part of the file again because of a seek. */ if ((boff = length & pmp->pm_crbomask) != 0) { if (isadir) { bn = cntobn(pmp, eofentry); error = bread(pmp->pm_devvp, bn, pmp->pm_bpcluster, NOCRED, &bp); if (error) { brelse(bp); #ifdef MSDOSFS_DEBUG printf("detrunc(): bread fails %d\n", error); #endif return (error); } - bzero(bp->b_data + boff, pmp->pm_bpcluster - boff); + memset(bp->b_data + boff, 0, pmp->pm_bpcluster - boff); if (flags & IO_SYNC) bwrite(bp); else bdwrite(bp); } } /* * Write out the updated directory entry. Even if the update fails * we free the trailing clusters. */ dep->de_FileSize = length; if (!isadir) dep->de_flag |= DE_UPDATE | DE_MODIFIED; allerror = vtruncbuf(DETOV(dep), cred, length, pmp->pm_bpcluster); #ifdef MSDOSFS_DEBUG if (allerror) printf("detrunc(): vtruncbuf error %d\n", allerror); #endif error = deupdat(dep, !DOINGASYNC((DETOV(dep)))); if (error != 0 && allerror == 0) allerror = error; #ifdef MSDOSFS_DEBUG printf("detrunc(): allerror %d, eofentry %lu\n", allerror, eofentry); #endif /* * If we need to break the cluster chain for the file then do it * now. */ if (eofentry != ~0) { error = fatentry(FAT_GET_AND_SET, pmp, eofentry, &chaintofree, CLUST_EOFE); if (error) { #ifdef MSDOSFS_DEBUG printf("detrunc(): fatentry errors %d\n", error); #endif return (error); } fc_setcache(dep, FC_LASTFC, de_cluster(pmp, length - 1), eofentry); } /* * Now free the clusters removed from the file because of the * truncation. */ if (chaintofree != 0 && !MSDOSFSEOF(pmp, chaintofree)) freeclusterchain(pmp, chaintofree); return (allerror); } /* * Extend the file described by dep to length specified by length. */ int deextend(struct denode *dep, u_long length, struct ucred *cred) { struct msdosfsmount *pmp = dep->de_pmp; u_long count; int error; /* * The root of a DOS filesystem cannot be extended. */ if ((DETOV(dep)->v_vflag & VV_ROOT) && !FAT32(pmp)) return (EINVAL); /* * Directories cannot be extended. */ if (dep->de_Attributes & ATTR_DIRECTORY) return (EISDIR); if (length <= dep->de_FileSize) panic("deextend: file too large"); /* * Compute the number of clusters to allocate. */ count = de_clcount(pmp, length) - de_clcount(pmp, dep->de_FileSize); if (count > 0) { if (count > pmp->pm_freeclustercount) return (ENOSPC); error = extendfile(dep, count, NULL, NULL, DE_CLEAR); if (error) { /* truncate the added clusters away again */ (void) detrunc(dep, dep->de_FileSize, 0, cred); return (error); } } dep->de_FileSize = length; dep->de_flag |= DE_UPDATE | DE_MODIFIED; return (deupdat(dep, !DOINGASYNC(DETOV(dep)))); } /* * Move a denode to its correct hash queue after the file it represents has * been moved to a new directory. */ void reinsert(struct denode *dep) { struct vnode *vp; /* * Fix up the denode cache. If the denode is for a directory, * there is nothing to do since the hash is based on the starting * cluster of the directory file and that hasn't changed. If for a * file the hash is based on the location of the directory entry, * so we must remove it from the cache and re-enter it with the * hash based on the new location of the directory entry. */ #if 0 if (dep->de_Attributes & ATTR_DIRECTORY) return; #endif vp = DETOV(dep); dep->de_inode = (uint64_t)dep->de_pmp->pm_bpcluster * dep->de_dirclust + dep->de_diroffset; vfs_hash_rehash(vp, dep->de_inode); } int msdosfs_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct denode *dep = VTODE(vp); #ifdef MSDOSFS_DEBUG printf("msdosfs_reclaim(): dep %p, file %s, refcnt %ld\n", dep, dep->de_Name, dep->de_refcnt); #endif /* * Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); /* * Remove the denode from its hash chain. */ vfs_hash_remove(vp); /* * Purge old data structures associated with the denode. */ #if 0 /* XXX */ dep->de_flag = 0; #endif free(dep, M_MSDOSFSNODE); vp->v_data = NULL; return (0); } int msdosfs_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct denode *dep = VTODE(vp); int error = 0; #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): dep %p, de_Name[0] %x\n", dep, dep->de_Name[0]); #endif /* * Ignore denodes related to stale file handles. */ if (dep->de_Name[0] == SLOT_DELETED || dep->de_Name[0] == SLOT_EMPTY) goto out; /* * If the file has been deleted and it is on a read/write * filesystem, then truncate the file, and mark the directory slot * as empty. (This may not be necessary for the dos filesystem.) */ #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): dep %p, refcnt %ld, mntflag %x, MNT_RDONLY %x\n", dep, dep->de_refcnt, vp->v_mount->mnt_flag, MNT_RDONLY); #endif if (dep->de_refcnt <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { error = detrunc(dep, (u_long) 0, 0, NOCRED); dep->de_flag |= DE_UPDATE; dep->de_Name[0] = SLOT_DELETED; } deupdat(dep, 0); out: /* * If we are done with the denode, reclaim it * so that it can be reused immediately. */ #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): v_usecount %d, de_Name[0] %x\n", vrefcnt(vp), dep->de_Name[0]); #endif if (dep->de_Name[0] == SLOT_DELETED || dep->de_Name[0] == SLOT_EMPTY) vrecycle(vp); return (error); } Index: projects/clang500-import/sys/fs/msdosfs/msdosfs_fat.c =================================================================== --- projects/clang500-import/sys/fs/msdosfs/msdosfs_fat.c (revision 319548) +++ projects/clang500-import/sys/fs/msdosfs/msdosfs_fat.c (revision 319549) @@ -1,1163 +1,1163 @@ /* $FreeBSD$ */ /* $NetBSD: msdosfs_fat.c,v 1.28 1997/11/17 15:36:49 ws Exp $ */ /*- * Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank. * Copyright (C) 1994, 1995, 1997 TooLs GmbH. * All rights reserved. * Original code by Paul Popelka (paulp@uts.amdahl.com) (see below). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Written by Paul Popelka (paulp@uts.amdahl.com) * * You can do anything you want with this software, just don't say you wrote * it, and don't remove this notice. * * This software is provided "as is". * * The author supplies this software to be publicly redistributed on the * understanding that the author is not responsible for the correct * functioning of this software in any circumstances and is not liable for * any damages caused by this software. * * October 1992 */ #include #include #include #include #include #include #include #include #include #include #define FULL_RUN ((u_int)0xffffffff) static int chainalloc(struct msdosfsmount *pmp, u_long start, u_long count, u_long fillwith, u_long *retcluster, u_long *got); static int chainlength(struct msdosfsmount *pmp, u_long start, u_long count); static void fatblock(struct msdosfsmount *pmp, u_long ofs, u_long *bnp, u_long *sizep, u_long *bop); static int fatchain(struct msdosfsmount *pmp, u_long start, u_long count, u_long fillwith); static void fc_lookup(struct denode *dep, u_long findcn, u_long *frcnp, u_long *fsrcnp); static void updatefats(struct msdosfsmount *pmp, struct buf *bp, u_long fatbn); static __inline void usemap_alloc(struct msdosfsmount *pmp, u_long cn); static __inline void usemap_free(struct msdosfsmount *pmp, u_long cn); static int clusteralloc1(struct msdosfsmount *pmp, u_long start, u_long count, u_long fillwith, u_long *retcluster, u_long *got); static void fatblock(struct msdosfsmount *pmp, u_long ofs, u_long *bnp, u_long *sizep, u_long *bop) { u_long bn, size; bn = ofs / pmp->pm_fatblocksize * pmp->pm_fatblocksec; size = min(pmp->pm_fatblocksec, pmp->pm_FATsecs - bn) * DEV_BSIZE; bn += pmp->pm_fatblk + pmp->pm_curfat * pmp->pm_FATsecs; if (bnp) *bnp = bn; if (sizep) *sizep = size; if (bop) *bop = ofs % pmp->pm_fatblocksize; } /* * Map the logical cluster number of a file into a physical disk sector * that is filesystem relative. * * dep - address of denode representing the file of interest * findcn - file relative cluster whose filesystem relative cluster number * and/or block number are/is to be found * bnp - address of where to place the filesystem relative block number. * If this pointer is null then don't return this quantity. * cnp - address of where to place the filesystem relative cluster number. * If this pointer is null then don't return this quantity. * sp - pointer to returned block size * * NOTE: Either bnp or cnp must be non-null. * This function has one side effect. If the requested file relative cluster * is beyond the end of file, then the actual number of clusters in the file * is returned in *cnp. This is useful for determining how long a directory is. * If cnp is null, nothing is returned. */ int pcbmap(struct denode *dep, u_long findcn, daddr_t *bnp, u_long *cnp, int *sp) { int error; u_long i; u_long cn; u_long prevcn = 0; /* XXX: prevcn could be used unititialized */ u_long byteoffset; u_long bn; u_long bo; struct buf *bp = NULL; u_long bp_bn = -1; struct msdosfsmount *pmp = dep->de_pmp; u_long bsize; KASSERT(bnp != NULL || cnp != NULL || sp != NULL, ("pcbmap: extra call")); ASSERT_VOP_ELOCKED(DETOV(dep), "pcbmap"); cn = dep->de_StartCluster; /* * The "file" that makes up the root directory is contiguous, * permanently allocated, of fixed size, and is not made up of * clusters. If the cluster number is beyond the end of the root * directory, then return the number of clusters in the file. */ if (cn == MSDOSFSROOT) { if (dep->de_Attributes & ATTR_DIRECTORY) { if (de_cn2off(pmp, findcn) >= dep->de_FileSize) { if (cnp) *cnp = de_bn2cn(pmp, pmp->pm_rootdirsize); return (E2BIG); } if (bnp) *bnp = pmp->pm_rootdirblk + de_cn2bn(pmp, findcn); if (cnp) *cnp = MSDOSFSROOT; if (sp) *sp = min(pmp->pm_bpcluster, dep->de_FileSize - de_cn2off(pmp, findcn)); return (0); } else { /* just an empty file */ if (cnp) *cnp = 0; return (E2BIG); } } /* * All other files do I/O in cluster sized blocks */ if (sp) *sp = pmp->pm_bpcluster; /* * Rummage around in the FAT cache, maybe we can avoid tromping * through every FAT entry for the file. And, keep track of how far * off the cache was from where we wanted to be. */ i = 0; fc_lookup(dep, findcn, &i, &cn); /* * Handle all other files or directories the normal way. */ for (; i < findcn; i++) { /* * Stop with all reserved clusters, not just with EOF. */ if ((cn | ~pmp->pm_fatmask) >= CLUST_RSRVD) goto hiteof; byteoffset = FATOFS(pmp, cn); fatblock(pmp, byteoffset, &bn, &bsize, &bo); if (bn != bp_bn) { if (bp) brelse(bp); error = bread(pmp->pm_devvp, bn, bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bp_bn = bn; } prevcn = cn; if (bo >= bsize) { if (bp) brelse(bp); return (EIO); } if (FAT32(pmp)) cn = getulong(&bp->b_data[bo]); else cn = getushort(&bp->b_data[bo]); if (FAT12(pmp) && (prevcn & 1)) cn >>= 4; cn &= pmp->pm_fatmask; /* * Force the special cluster numbers * to be the same for all cluster sizes * to let the rest of msdosfs handle * all cases the same. */ if ((cn | ~pmp->pm_fatmask) >= CLUST_RSRVD) cn |= ~pmp->pm_fatmask; } if (!MSDOSFSEOF(pmp, cn)) { if (bp) brelse(bp); if (bnp) *bnp = cntobn(pmp, cn); if (cnp) *cnp = cn; fc_setcache(dep, FC_LASTMAP, i, cn); return (0); } hiteof:; if (cnp) *cnp = i; if (bp) brelse(bp); /* update last file cluster entry in the FAT cache */ fc_setcache(dep, FC_LASTFC, i - 1, prevcn); return (E2BIG); } /* * Find the closest entry in the FAT cache to the cluster we are looking * for. */ static void fc_lookup(struct denode *dep, u_long findcn, u_long *frcnp, u_long *fsrcnp) { int i; u_long cn; struct fatcache *closest = NULL; ASSERT_VOP_LOCKED(DETOV(dep), "fc_lookup"); for (i = 0; i < FC_SIZE; i++) { cn = dep->de_fc[i].fc_frcn; if (cn != FCE_EMPTY && cn <= findcn) { if (closest == NULL || cn > closest->fc_frcn) closest = &dep->de_fc[i]; } } if (closest) { *frcnp = closest->fc_frcn; *fsrcnp = closest->fc_fsrcn; } } /* * Purge the FAT cache in denode dep of all entries relating to file * relative cluster frcn and beyond. */ void fc_purge(struct denode *dep, u_int frcn) { int i; struct fatcache *fcp; ASSERT_VOP_ELOCKED(DETOV(dep), "fc_purge"); fcp = dep->de_fc; for (i = 0; i < FC_SIZE; i++, fcp++) { if (fcp->fc_frcn >= frcn) fcp->fc_frcn = FCE_EMPTY; } } /* * Update the FAT. * If mirroring the FAT, update all copies, with the first copy as last. * Else update only the current FAT (ignoring the others). * * pmp - msdosfsmount structure for filesystem to update * bp - addr of modified FAT block * fatbn - block number relative to begin of filesystem of the modified FAT block. */ static void updatefats(struct msdosfsmount *pmp, struct buf *bp, u_long fatbn) { struct buf *bpn; int cleanfat, i; #ifdef MSDOSFS_DEBUG printf("updatefats(pmp %p, bp %p, fatbn %lu)\n", pmp, bp, fatbn); #endif if (pmp->pm_flags & MSDOSFS_FATMIRROR) { /* * Now copy the block(s) of the modified FAT to the other copies of * the FAT and write them out. This is faster than reading in the * other FATs and then writing them back out. This could tie up * the FAT for quite a while. Preventing others from accessing it. * To prevent us from going after the FAT quite so much we use * delayed writes, unless they specified "synchronous" when the * filesystem was mounted. If synch is asked for then use * bwrite()'s and really slow things down. */ if (fatbn != pmp->pm_fatblk || FAT12(pmp)) cleanfat = 0; else if (FAT16(pmp)) cleanfat = 16; else cleanfat = 32; for (i = 1; i < pmp->pm_FATs; i++) { fatbn += pmp->pm_FATsecs; /* getblk() never fails */ bpn = getblk(pmp->pm_devvp, fatbn, bp->b_bcount, 0, 0, 0); - bcopy(bp->b_data, bpn->b_data, bp->b_bcount); + memcpy(bpn->b_data, bp->b_data, bp->b_bcount); /* Force the clean bit on in the other copies. */ if (cleanfat == 16) ((uint8_t *)bpn->b_data)[3] |= 0x80; else if (cleanfat == 32) ((uint8_t *)bpn->b_data)[7] |= 0x08; if (pmp->pm_mountp->mnt_flag & MNT_SYNCHRONOUS) bwrite(bpn); else bdwrite(bpn); } } /* * Write out the first (or current) FAT last. */ if (pmp->pm_mountp->mnt_flag & MNT_SYNCHRONOUS) bwrite(bp); else bdwrite(bp); } /* * Updating entries in 12 bit FATs is a pain in the butt. * * The following picture shows where nibbles go when moving from a 12 bit * cluster number into the appropriate bytes in the FAT. * * byte m byte m+1 byte m+2 * +----+----+ +----+----+ +----+----+ * | 0 1 | | 2 3 | | 4 5 | FAT bytes * +----+----+ +----+----+ +----+----+ * * +----+----+----+ +----+----+----+ * | 3 0 1 | | 4 5 2 | * +----+----+----+ +----+----+----+ * cluster n cluster n+1 * * Where n is even. m = n + (n >> 2) * */ static __inline void usemap_alloc(struct msdosfsmount *pmp, u_long cn) { MSDOSFS_ASSERT_MP_LOCKED(pmp); KASSERT(cn <= pmp->pm_maxcluster, ("cn too large %lu %lu", cn, pmp->pm_maxcluster)); KASSERT((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0, ("usemap_alloc on ro msdosfs mount")); KASSERT((pmp->pm_inusemap[cn / N_INUSEBITS] & (1 << (cn % N_INUSEBITS))) == 0, ("Allocating used sector %ld %ld %x", cn, cn % N_INUSEBITS, (unsigned)pmp->pm_inusemap[cn / N_INUSEBITS])); pmp->pm_inusemap[cn / N_INUSEBITS] |= 1 << (cn % N_INUSEBITS); KASSERT(pmp->pm_freeclustercount > 0, ("usemap_alloc: too little")); pmp->pm_freeclustercount--; pmp->pm_flags |= MSDOSFS_FSIMOD; } static __inline void usemap_free(struct msdosfsmount *pmp, u_long cn) { MSDOSFS_ASSERT_MP_LOCKED(pmp); KASSERT(cn <= pmp->pm_maxcluster, ("cn too large %lu %lu", cn, pmp->pm_maxcluster)); KASSERT((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0, ("usemap_free on ro msdosfs mount")); pmp->pm_freeclustercount++; pmp->pm_flags |= MSDOSFS_FSIMOD; KASSERT((pmp->pm_inusemap[cn / N_INUSEBITS] & (1 << (cn % N_INUSEBITS))) != 0, ("Freeing unused sector %ld %ld %x", cn, cn % N_INUSEBITS, (unsigned)pmp->pm_inusemap[cn / N_INUSEBITS])); pmp->pm_inusemap[cn / N_INUSEBITS] &= ~(1 << (cn % N_INUSEBITS)); } int clusterfree(struct msdosfsmount *pmp, u_long cluster, u_long *oldcnp) { int error; u_long oldcn; error = fatentry(FAT_GET_AND_SET, pmp, cluster, &oldcn, MSDOSFSFREE); if (error) return (error); /* * If the cluster was successfully marked free, then update * the count of free clusters, and turn off the "allocated" * bit in the "in use" cluster bit map. */ MSDOSFS_LOCK_MP(pmp); usemap_free(pmp, cluster); MSDOSFS_UNLOCK_MP(pmp); if (oldcnp) *oldcnp = oldcn; return (0); } /* * Get or Set or 'Get and Set' the cluster'th entry in the FAT. * * function - whether to get or set a FAT entry * pmp - address of the msdosfsmount structure for the filesystem * whose FAT is to be manipulated. * cn - which cluster is of interest * oldcontents - address of a word that is to receive the contents of the * cluster'th entry if this is a get function * newcontents - the new value to be written into the cluster'th element of * the FAT if this is a set function. * * This function can also be used to free a cluster by setting the FAT entry * for a cluster to 0. * * All copies of the FAT are updated if this is a set function. NOTE: If * fatentry() marks a cluster as free it does not update the inusemap in * the msdosfsmount structure. This is left to the caller. */ int fatentry(int function, struct msdosfsmount *pmp, u_long cn, u_long *oldcontents, u_long newcontents) { int error; u_long readcn; u_long bn, bo, bsize, byteoffset; struct buf *bp; #ifdef MSDOSFS_DEBUG printf("fatentry(func %d, pmp %p, clust %lu, oldcon %p, newcon %lx)\n", function, pmp, cn, oldcontents, newcontents); #endif #ifdef DIAGNOSTIC /* * Be sure they asked us to do something. */ if ((function & (FAT_SET | FAT_GET)) == 0) { #ifdef MSDOSFS_DEBUG printf("fatentry(): function code doesn't specify get or set\n"); #endif return (EINVAL); } /* * If they asked us to return a cluster number but didn't tell us * where to put it, give them an error. */ if ((function & FAT_GET) && oldcontents == NULL) { #ifdef MSDOSFS_DEBUG printf("fatentry(): get function with no place to put result\n"); #endif return (EINVAL); } #endif /* * Be sure the requested cluster is in the filesystem. */ if (cn < CLUST_FIRST || cn > pmp->pm_maxcluster) return (EINVAL); byteoffset = FATOFS(pmp, cn); fatblock(pmp, byteoffset, &bn, &bsize, &bo); error = bread(pmp->pm_devvp, bn, bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } if (function & FAT_GET) { if (FAT32(pmp)) readcn = getulong(&bp->b_data[bo]); else readcn = getushort(&bp->b_data[bo]); if (FAT12(pmp) & (cn & 1)) readcn >>= 4; readcn &= pmp->pm_fatmask; /* map reserved FAT entries to same values for all FATs */ if ((readcn | ~pmp->pm_fatmask) >= CLUST_RSRVD) readcn |= ~pmp->pm_fatmask; *oldcontents = readcn; } if (function & FAT_SET) { switch (pmp->pm_fatmask) { case FAT12_MASK: readcn = getushort(&bp->b_data[bo]); if (cn & 1) { readcn &= 0x000f; readcn |= newcontents << 4; } else { readcn &= 0xf000; readcn |= newcontents & 0xfff; } putushort(&bp->b_data[bo], readcn); break; case FAT16_MASK: putushort(&bp->b_data[bo], newcontents); break; case FAT32_MASK: /* * According to spec we have to retain the * high order bits of the FAT entry. */ readcn = getulong(&bp->b_data[bo]); readcn &= ~FAT32_MASK; readcn |= newcontents & FAT32_MASK; putulong(&bp->b_data[bo], readcn); break; } updatefats(pmp, bp, bn); bp = NULL; pmp->pm_fmod = 1; } if (bp) brelse(bp); return (0); } /* * Update a contiguous cluster chain * * pmp - mount point * start - first cluster of chain * count - number of clusters in chain * fillwith - what to write into FAT entry of last cluster */ static int fatchain(struct msdosfsmount *pmp, u_long start, u_long count, u_long fillwith) { int error; u_long bn, bo, bsize, byteoffset, readcn, newc; struct buf *bp; #ifdef MSDOSFS_DEBUG printf("fatchain(pmp %p, start %lu, count %lu, fillwith %lx)\n", pmp, start, count, fillwith); #endif /* * Be sure the clusters are in the filesystem. */ if (start < CLUST_FIRST || start + count - 1 > pmp->pm_maxcluster) return (EINVAL); while (count > 0) { byteoffset = FATOFS(pmp, start); fatblock(pmp, byteoffset, &bn, &bsize, &bo); error = bread(pmp->pm_devvp, bn, bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } while (count > 0) { start++; newc = --count > 0 ? start : fillwith; switch (pmp->pm_fatmask) { case FAT12_MASK: readcn = getushort(&bp->b_data[bo]); if (start & 1) { readcn &= 0xf000; readcn |= newc & 0xfff; } else { readcn &= 0x000f; readcn |= newc << 4; } putushort(&bp->b_data[bo], readcn); bo++; if (!(start & 1)) bo++; break; case FAT16_MASK: putushort(&bp->b_data[bo], newc); bo += 2; break; case FAT32_MASK: readcn = getulong(&bp->b_data[bo]); readcn &= ~pmp->pm_fatmask; readcn |= newc & pmp->pm_fatmask; putulong(&bp->b_data[bo], readcn); bo += 4; break; } if (bo >= bsize) break; } updatefats(pmp, bp, bn); } pmp->pm_fmod = 1; return (0); } /* * Check the length of a free cluster chain starting at start. * * pmp - mount point * start - start of chain * count - maximum interesting length */ static int chainlength(struct msdosfsmount *pmp, u_long start, u_long count) { u_long idx, max_idx; u_int map; u_long len; MSDOSFS_ASSERT_MP_LOCKED(pmp); if (start > pmp->pm_maxcluster) return (0); max_idx = pmp->pm_maxcluster / N_INUSEBITS; idx = start / N_INUSEBITS; start %= N_INUSEBITS; map = pmp->pm_inusemap[idx]; map &= ~((1 << start) - 1); if (map) { len = ffs(map) - 1 - start; len = MIN(len, count); if (start + len > pmp->pm_maxcluster) len = pmp->pm_maxcluster - start + 1; return (len); } len = N_INUSEBITS - start; if (len >= count) { len = count; if (start + len > pmp->pm_maxcluster) len = pmp->pm_maxcluster - start + 1; return (len); } while (++idx <= max_idx) { if (len >= count) break; map = pmp->pm_inusemap[idx]; if (map) { len += ffs(map) - 1; break; } len += N_INUSEBITS; } len = MIN(len, count); if (start + len > pmp->pm_maxcluster) len = pmp->pm_maxcluster - start + 1; return (len); } /* * Allocate contigous free clusters. * * pmp - mount point. * start - start of cluster chain. * count - number of clusters to allocate. * fillwith - put this value into the FAT entry for the * last allocated cluster. * retcluster - put the first allocated cluster's number here. * got - how many clusters were actually allocated. */ static int chainalloc(struct msdosfsmount *pmp, u_long start, u_long count, u_long fillwith, u_long *retcluster, u_long *got) { int error; u_long cl, n; MSDOSFS_ASSERT_MP_LOCKED(pmp); KASSERT((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0, ("chainalloc on ro msdosfs mount")); for (cl = start, n = count; n-- > 0;) usemap_alloc(pmp, cl++); pmp->pm_nxtfree = start + count; if (pmp->pm_nxtfree > pmp->pm_maxcluster) pmp->pm_nxtfree = CLUST_FIRST; pmp->pm_flags |= MSDOSFS_FSIMOD; error = fatchain(pmp, start, count, fillwith); if (error != 0) { for (cl = start, n = count; n-- > 0;) usemap_free(pmp, cl++); return (error); } #ifdef MSDOSFS_DEBUG printf("clusteralloc(): allocated cluster chain at %lu (%lu clusters)\n", start, count); #endif if (retcluster) *retcluster = start; if (got) *got = count; return (0); } /* * Allocate contiguous free clusters. * * pmp - mount point. * start - preferred start of cluster chain. * count - number of clusters requested. * fillwith - put this value into the FAT entry for the * last allocated cluster. * retcluster - put the first allocated cluster's number here. * got - how many clusters were actually allocated. */ int clusteralloc(struct msdosfsmount *pmp, u_long start, u_long count, u_long fillwith, u_long *retcluster, u_long *got) { int error; MSDOSFS_LOCK_MP(pmp); error = clusteralloc1(pmp, start, count, fillwith, retcluster, got); MSDOSFS_UNLOCK_MP(pmp); return (error); } static int clusteralloc1(struct msdosfsmount *pmp, u_long start, u_long count, u_long fillwith, u_long *retcluster, u_long *got) { u_long idx; u_long len, newst, foundl, cn, l; u_long foundcn = 0; /* XXX: foundcn could be used unititialized */ u_int map; MSDOSFS_ASSERT_MP_LOCKED(pmp); #ifdef MSDOSFS_DEBUG printf("clusteralloc(): find %lu clusters\n", count); #endif if (start) { if ((len = chainlength(pmp, start, count)) >= count) return (chainalloc(pmp, start, count, fillwith, retcluster, got)); } else len = 0; newst = pmp->pm_nxtfree; foundl = 0; for (cn = newst; cn <= pmp->pm_maxcluster;) { idx = cn / N_INUSEBITS; map = pmp->pm_inusemap[idx]; map |= (1 << (cn % N_INUSEBITS)) - 1; if (map != FULL_RUN) { cn = idx * N_INUSEBITS + ffs(map ^ FULL_RUN) - 1; if ((l = chainlength(pmp, cn, count)) >= count) return (chainalloc(pmp, cn, count, fillwith, retcluster, got)); if (l > foundl) { foundcn = cn; foundl = l; } cn += l + 1; continue; } cn += N_INUSEBITS - cn % N_INUSEBITS; } for (cn = 0; cn < newst;) { idx = cn / N_INUSEBITS; map = pmp->pm_inusemap[idx]; map |= (1 << (cn % N_INUSEBITS)) - 1; if (map != FULL_RUN) { cn = idx * N_INUSEBITS + ffs(map ^ FULL_RUN) - 1; if ((l = chainlength(pmp, cn, count)) >= count) return (chainalloc(pmp, cn, count, fillwith, retcluster, got)); if (l > foundl) { foundcn = cn; foundl = l; } cn += l + 1; continue; } cn += N_INUSEBITS - cn % N_INUSEBITS; } if (!foundl) return (ENOSPC); if (len) return (chainalloc(pmp, start, len, fillwith, retcluster, got)); else return (chainalloc(pmp, foundcn, foundl, fillwith, retcluster, got)); } /* * Free a chain of clusters. * * pmp - address of the msdosfs mount structure for the filesystem * containing the cluster chain to be freed. * startcluster - number of the 1st cluster in the chain of clusters to be * freed. */ int freeclusterchain(struct msdosfsmount *pmp, u_long cluster) { int error; struct buf *bp = NULL; u_long bn, bo, bsize, byteoffset; u_long readcn, lbn = -1; MSDOSFS_LOCK_MP(pmp); while (cluster >= CLUST_FIRST && cluster <= pmp->pm_maxcluster) { byteoffset = FATOFS(pmp, cluster); fatblock(pmp, byteoffset, &bn, &bsize, &bo); if (lbn != bn) { if (bp) updatefats(pmp, bp, lbn); error = bread(pmp->pm_devvp, bn, bsize, NOCRED, &bp); if (error) { brelse(bp); MSDOSFS_UNLOCK_MP(pmp); return (error); } lbn = bn; } usemap_free(pmp, cluster); switch (pmp->pm_fatmask) { case FAT12_MASK: readcn = getushort(&bp->b_data[bo]); if (cluster & 1) { cluster = readcn >> 4; readcn &= 0x000f; readcn |= MSDOSFSFREE << 4; } else { cluster = readcn; readcn &= 0xf000; readcn |= MSDOSFSFREE & 0xfff; } putushort(&bp->b_data[bo], readcn); break; case FAT16_MASK: cluster = getushort(&bp->b_data[bo]); putushort(&bp->b_data[bo], MSDOSFSFREE); break; case FAT32_MASK: cluster = getulong(&bp->b_data[bo]); putulong(&bp->b_data[bo], (MSDOSFSFREE & FAT32_MASK) | (cluster & ~FAT32_MASK)); break; } cluster &= pmp->pm_fatmask; if ((cluster | ~pmp->pm_fatmask) >= CLUST_RSRVD) cluster |= pmp->pm_fatmask; } if (bp) updatefats(pmp, bp, bn); MSDOSFS_UNLOCK_MP(pmp); return (0); } /* * Read in FAT blocks looking for free clusters. For every free cluster * found turn off its corresponding bit in the pm_inusemap. */ int fillinusemap(struct msdosfsmount *pmp) { struct buf *bp = NULL; u_long cn, readcn; int error; u_long bn, bo, bsize, byteoffset; MSDOSFS_ASSERT_MP_LOCKED(pmp); /* * Mark all clusters in use, we mark the free ones in the FAT scan * loop further down. */ for (cn = 0; cn < (pmp->pm_maxcluster + N_INUSEBITS) / N_INUSEBITS; cn++) pmp->pm_inusemap[cn] = FULL_RUN; /* * Figure how many free clusters are in the filesystem by ripping * through the FAT counting the number of entries whose content is * zero. These represent free clusters. */ pmp->pm_freeclustercount = 0; for (cn = CLUST_FIRST; cn <= pmp->pm_maxcluster; cn++) { byteoffset = FATOFS(pmp, cn); bo = byteoffset % pmp->pm_fatblocksize; if (!bo || !bp) { /* Read new FAT block */ if (bp) brelse(bp); fatblock(pmp, byteoffset, &bn, &bsize, NULL); error = bread(pmp->pm_devvp, bn, bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } } if (FAT32(pmp)) readcn = getulong(&bp->b_data[bo]); else readcn = getushort(&bp->b_data[bo]); if (FAT12(pmp) && (cn & 1)) readcn >>= 4; readcn &= pmp->pm_fatmask; if (readcn == CLUST_FREE) usemap_free(pmp, cn); } if (bp != NULL) brelse(bp); for (cn = pmp->pm_maxcluster + 1; cn < (pmp->pm_maxcluster + N_INUSEBITS) / N_INUSEBITS; cn++) pmp->pm_inusemap[cn / N_INUSEBITS] |= 1 << (cn % N_INUSEBITS); return (0); } /* * Allocate a new cluster and chain it onto the end of the file. * * dep - the file to extend * count - number of clusters to allocate * bpp - where to return the address of the buf header for the first new * file block * ncp - where to put cluster number of the first newly allocated cluster * If this pointer is 0, do not return the cluster number. * flags - see fat.h * * NOTE: This function is not responsible for turning on the DE_UPDATE bit of * the de_flag field of the denode and it does not change the de_FileSize * field. This is left for the caller to do. */ int extendfile(struct denode *dep, u_long count, struct buf **bpp, u_long *ncp, int flags) { int error; u_long frcn; u_long cn, got; struct msdosfsmount *pmp = dep->de_pmp; struct buf *bp; daddr_t blkno; /* * Don't try to extend the root directory */ if (dep->de_StartCluster == MSDOSFSROOT && (dep->de_Attributes & ATTR_DIRECTORY)) { #ifdef MSDOSFS_DEBUG printf("extendfile(): attempt to extend root directory\n"); #endif return (ENOSPC); } /* * If the "file's last cluster" cache entry is empty, and the file * is not empty, then fill the cache entry by calling pcbmap(). */ if (dep->de_fc[FC_LASTFC].fc_frcn == FCE_EMPTY && dep->de_StartCluster != 0) { error = pcbmap(dep, 0xffff, 0, &cn, 0); /* we expect it to return E2BIG */ if (error != E2BIG) return (error); } dep->de_fc[FC_NEXTTOLASTFC].fc_frcn = dep->de_fc[FC_LASTFC].fc_frcn; dep->de_fc[FC_NEXTTOLASTFC].fc_fsrcn = dep->de_fc[FC_LASTFC].fc_fsrcn; while (count > 0) { /* * Allocate a new cluster chain and cat onto the end of the * file. * If the file is empty we make de_StartCluster point * to the new block. Note that de_StartCluster being * 0 is sufficient to be sure the file is empty since * we exclude attempts to extend the root directory * above, and the root dir is the only file with a * startcluster of 0 that has blocks allocated (sort * of). */ if (dep->de_StartCluster == 0) cn = 0; else cn = dep->de_fc[FC_LASTFC].fc_fsrcn + 1; error = clusteralloc(pmp, cn, count, CLUST_EOFE, &cn, &got); if (error) return (error); count -= got; /* * Give them the filesystem relative cluster number if they want * it. */ if (ncp) { *ncp = cn; ncp = NULL; } if (dep->de_StartCluster == 0) { dep->de_StartCluster = cn; frcn = 0; } else { error = fatentry(FAT_SET, pmp, dep->de_fc[FC_LASTFC].fc_fsrcn, 0, cn); if (error) { clusterfree(pmp, cn, NULL); return (error); } frcn = dep->de_fc[FC_LASTFC].fc_frcn + 1; } /* * Update the "last cluster of the file" entry in the * denode's FAT cache. */ fc_setcache(dep, FC_LASTFC, frcn + got - 1, cn + got - 1); if (flags & DE_CLEAR) { while (got-- > 0) { /* * Get the buf header for the new block of the file. */ if (dep->de_Attributes & ATTR_DIRECTORY) bp = getblk(pmp->pm_devvp, cntobn(pmp, cn++), pmp->pm_bpcluster, 0, 0, 0); else { bp = getblk(DETOV(dep), frcn++, pmp->pm_bpcluster, 0, 0, 0); /* * Do the bmap now, as in msdosfs_write */ if (pcbmap(dep, bp->b_lblkno, &blkno, 0, 0)) bp->b_blkno = -1; if (bp->b_blkno == -1) panic("extendfile: pcbmap"); else bp->b_blkno = blkno; } vfs_bio_clrbuf(bp); if (bpp) { *bpp = bp; bpp = NULL; } else bdwrite(bp); } } } return (0); } /*- * Routine to mark a FAT16 or FAT32 volume as "clean" or "dirty" by * manipulating the upper bit of the FAT entry for cluster 1. Note that * this bit is not defined for FAT12 volumes, which are always assumed to * be clean. * * The fatentry() routine only works on cluster numbers that a file could * occupy, so it won't manipulate the entry for cluster 1. So we have to do * it here. The code was stolen from fatentry() and tailored for cluster 1. * * Inputs: * pmp The MS-DOS volume to mark * dirty Non-zero if the volume should be marked dirty; zero if it * should be marked clean * * Result: * 0 Success * EROFS Volume is read-only * ? (other errors from called routines) */ int markvoldirty(struct msdosfsmount *pmp, int dirty) { struct buf *bp; u_long bn, bo, bsize, byteoffset, fatval; int error; /* * FAT12 does not support a "clean" bit, so don't do anything for * FAT12. */ if (FAT12(pmp)) return (0); /* Can't change the bit on a read-only filesystem. */ if (pmp->pm_flags & MSDOSFSMNT_RONLY) return (EROFS); /* * Fetch the block containing the FAT entry. It is given by the * pseudo-cluster 1. */ byteoffset = FATOFS(pmp, 1); fatblock(pmp, byteoffset, &bn, &bsize, &bo); error = bread(pmp->pm_devvp, bn, bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } /* * Get the current value of the FAT entry and set/clear the relevant * bit. Dirty means clear the "clean" bit; clean means set the * "clean" bit. */ if (FAT32(pmp)) { /* FAT32 uses bit 27. */ fatval = getulong(&bp->b_data[bo]); if (dirty) fatval &= 0xF7FFFFFF; else fatval |= 0x08000000; putulong(&bp->b_data[bo], fatval); } else { /* Must be FAT16; use bit 15. */ fatval = getushort(&bp->b_data[bo]); if (dirty) fatval &= 0x7FFF; else fatval |= 0x8000; putushort(&bp->b_data[bo], fatval); } /* Write out the modified FAT block synchronously. */ return (bwrite(bp)); } Index: projects/clang500-import/sys/fs/msdosfs/msdosfs_vnops.c =================================================================== --- projects/clang500-import/sys/fs/msdosfs/msdosfs_vnops.c (revision 319548) +++ projects/clang500-import/sys/fs/msdosfs/msdosfs_vnops.c (revision 319549) @@ -1,1963 +1,1963 @@ /* $FreeBSD$ */ /* $NetBSD: msdosfs_vnops.c,v 1.68 1998/02/10 14:10:04 mrg Exp $ */ /*- * Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank. * Copyright (C) 1994, 1995, 1997 TooLs GmbH. * All rights reserved. * Original code by Paul Popelka (paulp@uts.amdahl.com) (see below). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Written by Paul Popelka (paulp@uts.amdahl.com) * * You can do anything you want with this software, just don't say you wrote * it, and don't remove this notice. * * This software is provided "as is". * * The author supplies this software to be publicly redistributed on the * understanding that the author is not responsible for the correct * functioning of this software in any circumstances and is not liable for * any damages caused by this software. * * October 1992 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DOS_FILESIZE_MAX 0xffffffff /* * Prototypes for MSDOSFS vnode operations */ static vop_create_t msdosfs_create; static vop_mknod_t msdosfs_mknod; static vop_open_t msdosfs_open; static vop_close_t msdosfs_close; static vop_access_t msdosfs_access; static vop_getattr_t msdosfs_getattr; static vop_setattr_t msdosfs_setattr; static vop_read_t msdosfs_read; static vop_write_t msdosfs_write; static vop_fsync_t msdosfs_fsync; static vop_remove_t msdosfs_remove; static vop_link_t msdosfs_link; static vop_rename_t msdosfs_rename; static vop_mkdir_t msdosfs_mkdir; static vop_rmdir_t msdosfs_rmdir; static vop_symlink_t msdosfs_symlink; static vop_readdir_t msdosfs_readdir; static vop_bmap_t msdosfs_bmap; static vop_getpages_t msdosfs_getpages; static vop_strategy_t msdosfs_strategy; static vop_print_t msdosfs_print; static vop_pathconf_t msdosfs_pathconf; static vop_vptofh_t msdosfs_vptofh; /* * Some general notes: * * In the ufs filesystem the inodes, superblocks, and indirect blocks are * read/written using the vnode for the filesystem. Blocks that represent * the contents of a file are read/written using the vnode for the file * (including directories when they are read/written as files). This * presents problems for the dos filesystem because data that should be in * an inode (if dos had them) resides in the directory itself. Since we * must update directory entries without the benefit of having the vnode * for the directory we must use the vnode for the filesystem. This means * that when a directory is actually read/written (via read, write, or * readdir, or seek) we must use the vnode for the filesystem instead of * the vnode for the directory as would happen in ufs. This is to insure we * retrieve the correct block from the buffer cache since the hash value is * based upon the vnode address and the desired block number. */ /* * Create a regular file. On entry the directory to contain the file being * created is locked. We must release before we return. We must also free * the pathname buffer pointed at by cnp->cn_pnbuf, always on error, or * only if the SAVESTART bit in cn_flags is clear on success. */ static int msdosfs_create(struct vop_create_args *ap) { struct componentname *cnp = ap->a_cnp; struct denode ndirent; struct denode *dep; struct denode *pdep = VTODE(ap->a_dvp); struct timespec ts; int error; #ifdef MSDOSFS_DEBUG printf("msdosfs_create(cnp %p, vap %p\n", cnp, ap->a_vap); #endif /* * If this is the root directory and there is no space left we * can't do anything. This is because the root directory can not * change size. */ if (pdep->de_StartCluster == MSDOSFSROOT && pdep->de_fndoffset >= pdep->de_FileSize) { error = ENOSPC; goto bad; } /* * Create a directory entry for the file, then call createde() to * have it installed. NOTE: DOS files are always executable. We * use the absence of the owner write bit to make the file * readonly. */ #ifdef DIAGNOSTIC if ((cnp->cn_flags & HASBUF) == 0) panic("msdosfs_create: no name"); #endif - bzero(&ndirent, sizeof(ndirent)); + memset(&ndirent, 0, sizeof(ndirent)); error = uniqdosname(pdep, cnp, ndirent.de_Name); if (error) goto bad; ndirent.de_Attributes = ATTR_ARCHIVE; ndirent.de_LowerCase = 0; ndirent.de_StartCluster = 0; ndirent.de_FileSize = 0; ndirent.de_pmp = pdep->de_pmp; ndirent.de_flag = DE_ACCESS | DE_CREATE | DE_UPDATE; getnanotime(&ts); DETIMES(&ndirent, &ts, &ts, &ts); error = createde(&ndirent, pdep, &dep, cnp); if (error) goto bad; *ap->a_vpp = DETOV(dep); if ((cnp->cn_flags & MAKEENTRY) != 0) cache_enter(ap->a_dvp, *ap->a_vpp, cnp); return (0); bad: return (error); } static int msdosfs_mknod(struct vop_mknod_args *ap) { return (EINVAL); } static int msdosfs_open(struct vop_open_args *ap) { struct denode *dep = VTODE(ap->a_vp); vnode_create_vobject(ap->a_vp, dep->de_FileSize, ap->a_td); return 0; } static int msdosfs_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; struct denode *dep = VTODE(vp); struct timespec ts; VI_LOCK(vp); if (vp->v_usecount > 1) { getnanotime(&ts); DETIMES(dep, &ts, &ts, &ts); } VI_UNLOCK(vp); return 0; } static int msdosfs_access(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; struct denode *dep = VTODE(ap->a_vp); struct msdosfsmount *pmp = dep->de_pmp; mode_t file_mode; accmode_t accmode = ap->a_accmode; file_mode = S_IRWXU|S_IRWXG|S_IRWXO; file_mode &= (vp->v_type == VDIR ? pmp->pm_dirmask : pmp->pm_mask); /* * Disallow writing to directories and regular files if the * filesystem is read-only. */ if (accmode & VWRITE) { switch (vp->v_type) { case VREG: case VDIR: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } } return (vaccess(vp->v_type, file_mode, pmp->pm_uid, pmp->pm_gid, ap->a_accmode, ap->a_cred, NULL)); } static int msdosfs_getattr(struct vop_getattr_args *ap) { struct denode *dep = VTODE(ap->a_vp); struct msdosfsmount *pmp = dep->de_pmp; struct vattr *vap = ap->a_vap; mode_t mode; struct timespec ts; u_long dirsperblk = pmp->pm_BytesPerSec / sizeof(struct direntry); uint64_t fileid; getnanotime(&ts); DETIMES(dep, &ts, &ts, &ts); vap->va_fsid = dev2udev(pmp->pm_dev); /* * The following computation of the fileid must be the same as that * used in msdosfs_readdir() to compute d_fileno. If not, pwd * doesn't work. */ if (dep->de_Attributes & ATTR_DIRECTORY) { fileid = (uint64_t)cntobn(pmp, dep->de_StartCluster) * dirsperblk; if (dep->de_StartCluster == MSDOSFSROOT) fileid = 1; } else { fileid = (uint64_t)cntobn(pmp, dep->de_dirclust) * dirsperblk; if (dep->de_dirclust == MSDOSFSROOT) fileid = (uint64_t)roottobn(pmp, 0) * dirsperblk; fileid += (uoff_t)dep->de_diroffset / sizeof(struct direntry); } if (pmp->pm_flags & MSDOSFS_LARGEFS) vap->va_fileid = msdosfs_fileno_map(pmp->pm_mountp, fileid); else vap->va_fileid = (long)fileid; mode = S_IRWXU|S_IRWXG|S_IRWXO; vap->va_mode = mode & (ap->a_vp->v_type == VDIR ? pmp->pm_dirmask : pmp->pm_mask); vap->va_uid = pmp->pm_uid; vap->va_gid = pmp->pm_gid; vap->va_nlink = 1; vap->va_rdev = NODEV; vap->va_size = dep->de_FileSize; fattime2timespec(dep->de_MDate, dep->de_MTime, 0, 0, &vap->va_mtime); vap->va_ctime = vap->va_mtime; if (pmp->pm_flags & MSDOSFSMNT_LONGNAME) { fattime2timespec(dep->de_ADate, 0, 0, 0, &vap->va_atime); fattime2timespec(dep->de_CDate, dep->de_CTime, dep->de_CHun, 0, &vap->va_birthtime); } else { vap->va_atime = vap->va_mtime; vap->va_birthtime.tv_sec = -1; vap->va_birthtime.tv_nsec = 0; } vap->va_flags = 0; if (dep->de_Attributes & ATTR_ARCHIVE) vap->va_flags |= UF_ARCHIVE; if (dep->de_Attributes & ATTR_HIDDEN) vap->va_flags |= UF_HIDDEN; if (dep->de_Attributes & ATTR_READONLY) vap->va_flags |= UF_READONLY; if (dep->de_Attributes & ATTR_SYSTEM) vap->va_flags |= UF_SYSTEM; vap->va_gen = 0; vap->va_blocksize = pmp->pm_bpcluster; vap->va_bytes = (dep->de_FileSize + pmp->pm_crbomask) & ~pmp->pm_crbomask; vap->va_type = ap->a_vp->v_type; vap->va_filerev = dep->de_modrev; return (0); } static int msdosfs_setattr(struct vop_setattr_args *ap) { struct vnode *vp = ap->a_vp; struct denode *dep = VTODE(ap->a_vp); struct msdosfsmount *pmp = dep->de_pmp; struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct thread *td = curthread; int error = 0; #ifdef MSDOSFS_DEBUG printf("msdosfs_setattr(): vp %p, vap %p, cred %p\n", ap->a_vp, vap, cred); #endif /* * Check for unsettable attributes. */ if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || (vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { #ifdef MSDOSFS_DEBUG printf("msdosfs_setattr(): returning EINVAL\n"); printf(" va_type %d, va_nlink %x, va_fsid %lx, va_fileid %lx\n", vap->va_type, vap->va_nlink, vap->va_fsid, vap->va_fileid); printf(" va_blocksize %lx, va_rdev %x, va_bytes %qx, va_gen %lx\n", vap->va_blocksize, vap->va_rdev, vap->va_bytes, vap->va_gen); printf(" va_uid %x, va_gid %x\n", vap->va_uid, vap->va_gid); #endif return (EINVAL); } /* * We don't allow setting attributes on the root directory. * The special case for the root directory is because before * FAT32, the root directory didn't have an entry for itself * (and was otherwise special). With FAT32, the root * directory is not so special, but still doesn't have an * entry for itself. */ if (vp->v_vflag & VV_ROOT) return (EINVAL); if (vap->va_flags != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if (cred->cr_uid != pmp->pm_uid) { error = priv_check_cred(cred, PRIV_VFS_ADMIN, 0); if (error) return (error); } /* * We are very inconsistent about handling unsupported * attributes. We ignored the access time and the * read and execute bits. We were strict for the other * attributes. */ if (vap->va_flags & ~(UF_ARCHIVE | UF_HIDDEN | UF_READONLY | UF_SYSTEM)) return EOPNOTSUPP; if (vap->va_flags & UF_ARCHIVE) dep->de_Attributes |= ATTR_ARCHIVE; else dep->de_Attributes &= ~ATTR_ARCHIVE; if (vap->va_flags & UF_HIDDEN) dep->de_Attributes |= ATTR_HIDDEN; else dep->de_Attributes &= ~ATTR_HIDDEN; /* We don't allow changing the readonly bit on directories. */ if (vp->v_type != VDIR) { if (vap->va_flags & UF_READONLY) dep->de_Attributes |= ATTR_READONLY; else dep->de_Attributes &= ~ATTR_READONLY; } if (vap->va_flags & UF_SYSTEM) dep->de_Attributes |= ATTR_SYSTEM; else dep->de_Attributes &= ~ATTR_SYSTEM; dep->de_flag |= DE_MODIFIED; } if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { uid_t uid; gid_t gid; if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); uid = vap->va_uid; if (uid == (uid_t)VNOVAL) uid = pmp->pm_uid; gid = vap->va_gid; if (gid == (gid_t)VNOVAL) gid = pmp->pm_gid; if (cred->cr_uid != pmp->pm_uid || uid != pmp->pm_uid || (gid != pmp->pm_gid && !groupmember(gid, cred))) { error = priv_check_cred(cred, PRIV_VFS_CHOWN, 0); if (error) return (error); } if (uid != pmp->pm_uid || gid != pmp->pm_gid) return EINVAL; } if (vap->va_size != VNOVAL) { switch (vp->v_type) { case VDIR: return (EISDIR); case VREG: /* * Truncation is only supported for regular files, * Disallow it if the filesystem is read-only. */ if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: /* * According to POSIX, the result is unspecified * for file types other than regular files, * directories and shared memory objects. We * don't support any file types except regular * files and directories in this file system, so * this (default) case is unreachable and can do * anything. Keep falling through to detrunc() * for now. */ break; } error = detrunc(dep, vap->va_size, 0, cred); if (error) return error; } if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); error = vn_utimes_perm(vp, vap, cred, td); if (error != 0) return (error); if ((pmp->pm_flags & MSDOSFSMNT_NOWIN95) == 0 && vap->va_atime.tv_sec != VNOVAL) { dep->de_flag &= ~DE_ACCESS; timespec2fattime(&vap->va_atime, 0, &dep->de_ADate, NULL, NULL); } if (vap->va_mtime.tv_sec != VNOVAL) { dep->de_flag &= ~DE_UPDATE; timespec2fattime(&vap->va_mtime, 0, &dep->de_MDate, &dep->de_MTime, NULL); } /* * We don't set the archive bit when modifying the time of * a directory to emulate the Windows/DOS behavior. */ if (vp->v_type != VDIR) dep->de_Attributes |= ATTR_ARCHIVE; dep->de_flag |= DE_MODIFIED; } /* * DOS files only have the ability to have their writability * attribute set, so we use the owner write bit to set the readonly * attribute. */ if (vap->va_mode != (mode_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if (cred->cr_uid != pmp->pm_uid) { error = priv_check_cred(cred, PRIV_VFS_ADMIN, 0); if (error) return (error); } if (vp->v_type != VDIR) { /* We ignore the read and execute bits. */ if (vap->va_mode & VWRITE) dep->de_Attributes &= ~ATTR_READONLY; else dep->de_Attributes |= ATTR_READONLY; dep->de_Attributes |= ATTR_ARCHIVE; dep->de_flag |= DE_MODIFIED; } } return (deupdat(dep, 0)); } static int msdosfs_read(struct vop_read_args *ap) { int error = 0; int blsize; int isadir; ssize_t orig_resid; u_int n; u_long diff; u_long on; daddr_t lbn; daddr_t rablock; int rasize; int seqcount; struct buf *bp; struct vnode *vp = ap->a_vp; struct denode *dep = VTODE(vp); struct msdosfsmount *pmp = dep->de_pmp; struct uio *uio = ap->a_uio; /* * If they didn't ask for any data, then we are done. */ orig_resid = uio->uio_resid; if (orig_resid == 0) return (0); /* * The caller is supposed to ensure that * uio->uio_offset >= 0 and uio->uio_resid >= 0. * We don't need to check for large offsets as in ffs because * dep->de_FileSize <= DOS_FILESIZE_MAX < OFF_MAX, so large * offsets cannot cause overflow even in theory. */ seqcount = ap->a_ioflag >> IO_SEQSHIFT; isadir = dep->de_Attributes & ATTR_DIRECTORY; do { if (uio->uio_offset >= dep->de_FileSize) break; lbn = de_cluster(pmp, uio->uio_offset); rablock = lbn + 1; blsize = pmp->pm_bpcluster; on = uio->uio_offset & pmp->pm_crbomask; /* * If we are operating on a directory file then be sure to * do i/o with the vnode for the filesystem instead of the * vnode for the directory. */ if (isadir) { /* convert cluster # to block # */ error = pcbmap(dep, lbn, &lbn, 0, &blsize); if (error == E2BIG) { error = EINVAL; break; } else if (error) break; error = bread(pmp->pm_devvp, lbn, blsize, NOCRED, &bp); } else if (de_cn2off(pmp, rablock) >= dep->de_FileSize) { error = bread(vp, lbn, blsize, NOCRED, &bp); } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, dep->de_FileSize, lbn, blsize, NOCRED, on + uio->uio_resid, seqcount, 0, &bp); } else if (seqcount > 1) { rasize = blsize; error = breadn(vp, lbn, blsize, &rablock, &rasize, 1, NOCRED, &bp); } else { error = bread(vp, lbn, blsize, NOCRED, &bp); } if (error) { brelse(bp); break; } diff = pmp->pm_bpcluster - on; n = diff > uio->uio_resid ? uio->uio_resid : diff; diff = dep->de_FileSize - uio->uio_offset; if (diff < n) n = diff; diff = blsize - bp->b_resid; if (diff < n) n = diff; error = vn_io_fault_uiomove(bp->b_data + on, (int) n, uio); brelse(bp); } while (error == 0 && uio->uio_resid > 0 && n != 0); if (!isadir && (error == 0 || uio->uio_resid != orig_resid) && (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) dep->de_flag |= DE_ACCESS; return (error); } /* * Write data to a file or directory. */ static int msdosfs_write(struct vop_write_args *ap) { int n; int croffset; ssize_t resid; u_long osize; int error = 0; u_long count; int seqcount; daddr_t bn, lastcn; struct buf *bp; int ioflag = ap->a_ioflag; struct uio *uio = ap->a_uio; struct vnode *vp = ap->a_vp; struct vnode *thisvp; struct denode *dep = VTODE(vp); struct msdosfsmount *pmp = dep->de_pmp; struct ucred *cred = ap->a_cred; #ifdef MSDOSFS_DEBUG printf("msdosfs_write(vp %p, uio %p, ioflag %x, cred %p\n", vp, uio, ioflag, cred); printf("msdosfs_write(): diroff %lu, dirclust %lu, startcluster %lu\n", dep->de_diroffset, dep->de_dirclust, dep->de_StartCluster); #endif switch (vp->v_type) { case VREG: if (ioflag & IO_APPEND) uio->uio_offset = dep->de_FileSize; thisvp = vp; break; case VDIR: return EISDIR; default: panic("msdosfs_write(): bad file type"); } /* * This is needed (unlike in ffs_write()) because we extend the * file outside of the loop but we don't want to extend the file * for writes of 0 bytes. */ if (uio->uio_resid == 0) return (0); /* * The caller is supposed to ensure that * uio->uio_offset >= 0 and uio->uio_resid >= 0. */ if ((uoff_t)uio->uio_offset + uio->uio_resid > DOS_FILESIZE_MAX) return (EFBIG); /* * If they've exceeded their filesize limit, tell them about it. */ if (vn_rlimit_fsize(vp, uio, uio->uio_td)) return (EFBIG); /* * If the offset we are starting the write at is beyond the end of * the file, then they've done a seek. Unix filesystems allow * files with holes in them, DOS doesn't so we must fill the hole * with zeroed blocks. */ if (uio->uio_offset > dep->de_FileSize) { error = deextend(dep, uio->uio_offset, cred); if (error) return (error); } /* * Remember some values in case the write fails. */ resid = uio->uio_resid; osize = dep->de_FileSize; /* * If we write beyond the end of the file, extend it to its ultimate * size ahead of the time to hopefully get a contiguous area. */ if (uio->uio_offset + resid > osize) { count = de_clcount(pmp, uio->uio_offset + resid) - de_clcount(pmp, osize); error = extendfile(dep, count, NULL, NULL, 0); if (error && (error != ENOSPC || (ioflag & IO_UNIT))) goto errexit; lastcn = dep->de_fc[FC_LASTFC].fc_frcn; } else lastcn = de_clcount(pmp, osize) - 1; seqcount = ioflag >> IO_SEQSHIFT; do { if (de_cluster(pmp, uio->uio_offset) > lastcn) { error = ENOSPC; break; } croffset = uio->uio_offset & pmp->pm_crbomask; n = min(uio->uio_resid, pmp->pm_bpcluster - croffset); if (uio->uio_offset + n > dep->de_FileSize) { dep->de_FileSize = uio->uio_offset + n; /* The object size needs to be set before buffer is allocated */ vnode_pager_setsize(vp, dep->de_FileSize); } bn = de_cluster(pmp, uio->uio_offset); if ((uio->uio_offset & pmp->pm_crbomask) == 0 && (de_cluster(pmp, uio->uio_offset + uio->uio_resid) > de_cluster(pmp, uio->uio_offset) || uio->uio_offset + uio->uio_resid >= dep->de_FileSize)) { /* * If either the whole cluster gets written, * or we write the cluster from its start beyond EOF, * then no need to read data from disk. */ bp = getblk(thisvp, bn, pmp->pm_bpcluster, 0, 0, 0); /* * This call to vfs_bio_clrbuf() ensures that * even if vn_io_fault_uiomove() below faults, * garbage from the newly instantiated buffer * is not exposed to the userspace via mmap(). */ vfs_bio_clrbuf(bp); /* * Do the bmap now, since pcbmap needs buffers * for the FAT table. (see msdosfs_strategy) */ if (bp->b_blkno == bp->b_lblkno) { error = pcbmap(dep, bp->b_lblkno, &bn, 0, 0); if (error) bp->b_blkno = -1; else bp->b_blkno = bn; } if (bp->b_blkno == -1) { brelse(bp); if (!error) error = EIO; /* XXX */ break; } } else { /* * The block we need to write into exists, so read it in. */ error = bread(thisvp, bn, pmp->pm_bpcluster, cred, &bp); if (error) { brelse(bp); break; } } /* * Should these vnode_pager_* functions be done on dir * files? */ /* * Copy the data from user space into the buf header. */ error = vn_io_fault_uiomove(bp->b_data + croffset, n, uio); if (error) { brelse(bp); break; } /* Prepare for clustered writes in some else clauses. */ if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) bp->b_flags |= B_CLUSTEROK; /* * If IO_SYNC, then each buffer is written synchronously. * Otherwise, if we have a severe page deficiency then * write the buffer asynchronously. Otherwise, if on a * cluster boundary then write the buffer asynchronously, * combining it with contiguous clusters if permitted and * possible, since we don't expect more writes into this * buffer soon. Otherwise, do a delayed write because we * expect more writes into this buffer soon. */ if (ioflag & IO_SYNC) (void)bwrite(bp); else if (vm_page_count_severe() || buf_dirty_count_severe()) bawrite(bp); else if (n + croffset == pmp->pm_bpcluster) { if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) cluster_write(vp, bp, dep->de_FileSize, seqcount, 0); else bawrite(bp); } else bdwrite(bp); dep->de_flag |= DE_UPDATE; } while (error == 0 && uio->uio_resid > 0); /* * If the write failed and they want us to, truncate the file back * to the size it was before the write was attempted. */ errexit: if (error) { if (ioflag & IO_UNIT) { detrunc(dep, osize, ioflag & IO_SYNC, NOCRED); uio->uio_offset -= resid - uio->uio_resid; uio->uio_resid = resid; } else { detrunc(dep, dep->de_FileSize, ioflag & IO_SYNC, NOCRED); if (uio->uio_resid != resid) error = 0; } } else if (ioflag & IO_SYNC) error = deupdat(dep, 1); return (error); } /* * Flush the blocks of a file to disk. */ static int msdosfs_fsync(struct vop_fsync_args *ap) { struct vnode *devvp; int allerror, error; vop_stdfsync(ap); /* * If the syncing request comes from fsync(2), sync the entire * FAT and any other metadata that happens to be on devvp. We * need this mainly for the FAT. We write the FAT sloppily, and * syncing it all now is the best we can easily do to get all * directory entries associated with the file (not just the file) * fully synced. The other metadata includes critical metadata * for all directory entries, but only in the MNT_ASYNC case. We * will soon sync all metadata in the file's directory entry. * Non-critical metadata for associated directory entries only * gets synced accidentally, as in most file systems. */ if (ap->a_waitfor == MNT_WAIT) { devvp = VTODE(ap->a_vp)->de_pmp->pm_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); allerror = VOP_FSYNC(devvp, MNT_WAIT, ap->a_td); VOP_UNLOCK(devvp, 0); } else allerror = 0; error = deupdat(VTODE(ap->a_vp), ap->a_waitfor == MNT_WAIT); if (allerror == 0) allerror = error; return (allerror); } static int msdosfs_remove(struct vop_remove_args *ap) { struct denode *dep = VTODE(ap->a_vp); struct denode *ddep = VTODE(ap->a_dvp); int error; if (ap->a_vp->v_type == VDIR) error = EPERM; else error = removede(ddep, dep); #ifdef MSDOSFS_DEBUG printf("msdosfs_remove(), dep %p, v_usecount %d\n", dep, ap->a_vp->v_usecount); #endif return (error); } /* * DOS filesystems don't know what links are. */ static int msdosfs_link(struct vop_link_args *ap) { return (EOPNOTSUPP); } /* * Renames on files require moving the denode to a new hash queue since the * denode's location is used to compute which hash queue to put the file * in. Unless it is a rename in place. For example "mv a b". * * What follows is the basic algorithm: * * if (file move) { * if (dest file exists) { * remove dest file * } * if (dest and src in same directory) { * rewrite name in existing directory slot * } else { * write new entry in dest directory * update offset and dirclust in denode * move denode to new hash chain * clear old directory entry * } * } else { * directory move * if (dest directory exists) { * if (dest is not empty) { * return ENOTEMPTY * } * remove dest directory * } * if (dest and src in same directory) { * rewrite name in existing entry * } else { * be sure dest is not a child of src directory * write entry in dest directory * update "." and ".." in moved directory * clear old directory entry for moved directory * } * } * * On entry: * source's parent directory is unlocked * source file or directory is unlocked * destination's parent directory is locked * destination file or directory is locked if it exists * * On exit: * all denodes should be released */ static int msdosfs_rename(struct vop_rename_args *ap) { struct vnode *tdvp = ap->a_tdvp; struct vnode *fvp = ap->a_fvp; struct vnode *fdvp = ap->a_fdvp; struct vnode *tvp = ap->a_tvp; struct componentname *tcnp = ap->a_tcnp; struct componentname *fcnp = ap->a_fcnp; struct denode *ip, *xp, *dp, *zp; u_char toname[12], oldname[11]; u_long from_diroffset, to_diroffset; u_char to_count; int doingdirectory = 0, newparent = 0; int error; u_long cn, pcl; daddr_t bn; struct msdosfsmount *pmp; struct direntry *dotdotp; struct buf *bp; pmp = VFSTOMSDOSFS(fdvp->v_mount); #ifdef DIAGNOSTIC if ((tcnp->cn_flags & HASBUF) == 0 || (fcnp->cn_flags & HASBUF) == 0) panic("msdosfs_rename: no name"); #endif /* * Check for cross-device rename. */ if (fvp->v_mount != tdvp->v_mount || (tvp && fvp->v_mount != tvp->v_mount)) { error = EXDEV; abortit: if (tdvp == tvp) vrele(tdvp); else vput(tdvp); if (tvp) vput(tvp); vrele(fdvp); vrele(fvp); return (error); } /* * If source and dest are the same, do nothing. */ if (tvp == fvp) { error = 0; goto abortit; } error = vn_lock(fvp, LK_EXCLUSIVE); if (error) goto abortit; dp = VTODE(fdvp); ip = VTODE(fvp); /* * Be sure we are not renaming ".", "..", or an alias of ".". This * leads to a crippled directory tree. It's pretty tough to do a * "ls" or "pwd" with the "." directory entry missing, and "cd .." * doesn't work if the ".." entry is missing. */ if (ip->de_Attributes & ATTR_DIRECTORY) { /* * Avoid ".", "..", and aliases of "." for obvious reasons. */ if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || dp == ip || (fcnp->cn_flags & ISDOTDOT) || (tcnp->cn_flags & ISDOTDOT) || (ip->de_flag & DE_RENAME)) { VOP_UNLOCK(fvp, 0); error = EINVAL; goto abortit; } ip->de_flag |= DE_RENAME; doingdirectory++; } /* * When the target exists, both the directory * and target vnodes are returned locked. */ dp = VTODE(tdvp); xp = tvp ? VTODE(tvp) : NULL; /* * Remember direntry place to use for destination */ to_diroffset = dp->de_fndoffset; to_count = dp->de_fndcnt; /* * If ".." must be changed (ie the directory gets a new * parent) then the source directory must not be in the * directory hierarchy above the target, as this would * orphan everything below the source directory. Also * the user must have write permission in the source so * as to be able to change "..". We must repeat the call * to namei, as the parent directory is unlocked by the * call to doscheckpath(). */ error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_thread); VOP_UNLOCK(fvp, 0); if (VTODE(fdvp)->de_StartCluster != VTODE(tdvp)->de_StartCluster) newparent = 1; if (doingdirectory && newparent) { if (error) /* write access check above */ goto bad; if (xp != NULL) vput(tvp); /* * doscheckpath() vput()'s dp, * so we have to do a relookup afterwards */ error = doscheckpath(ip, dp); if (error) goto out; if ((tcnp->cn_flags & SAVESTART) == 0) panic("msdosfs_rename: lost to startdir"); error = relookup(tdvp, &tvp, tcnp); if (error) goto out; dp = VTODE(tdvp); xp = tvp ? VTODE(tvp) : NULL; } if (xp != NULL) { /* * Target must be empty if a directory and have no links * to it. Also, ensure source and target are compatible * (both directories, or both not directories). */ if (xp->de_Attributes & ATTR_DIRECTORY) { if (!dosdirempty(xp)) { error = ENOTEMPTY; goto bad; } if (!doingdirectory) { error = ENOTDIR; goto bad; } cache_purge(tdvp); } else if (doingdirectory) { error = EISDIR; goto bad; } error = removede(dp, xp); if (error) goto bad; vput(tvp); xp = NULL; } /* * Convert the filename in tcnp into a dos filename. We copy this * into the denode and directory entry for the destination * file/directory. */ error = uniqdosname(VTODE(tdvp), tcnp, toname); if (error) goto abortit; /* * Since from wasn't locked at various places above, * have to do a relookup here. */ fcnp->cn_flags &= ~MODMASK; fcnp->cn_flags |= LOCKPARENT | LOCKLEAF; if ((fcnp->cn_flags & SAVESTART) == 0) panic("msdosfs_rename: lost from startdir"); if (!newparent) VOP_UNLOCK(tdvp, 0); if (relookup(fdvp, &fvp, fcnp) == 0) vrele(fdvp); if (fvp == NULL) { /* * From name has disappeared. */ if (doingdirectory) panic("rename: lost dir entry"); if (newparent) VOP_UNLOCK(tdvp, 0); vrele(tdvp); vrele(ap->a_fvp); return 0; } xp = VTODE(fvp); zp = VTODE(fdvp); from_diroffset = zp->de_fndoffset; /* * Ensure that the directory entry still exists and has not * changed till now. If the source is a file the entry may * have been unlinked or renamed. In either case there is * no further work to be done. If the source is a directory * then it cannot have been rmdir'ed or renamed; this is * prohibited by the DE_RENAME flag. */ if (xp != ip) { if (doingdirectory) panic("rename: lost dir entry"); VOP_UNLOCK(fvp, 0); if (newparent) VOP_UNLOCK(fdvp, 0); vrele(ap->a_fvp); xp = NULL; } else { vrele(fvp); xp = NULL; /* * First write a new entry in the destination * directory and mark the entry in the source directory * as deleted. Then move the denode to the correct hash * chain for its new location in the filesystem. And, if * we moved a directory, then update its .. entry to point * to the new parent directory. */ - bcopy(ip->de_Name, oldname, 11); - bcopy(toname, ip->de_Name, 11); /* update denode */ + memcpy(oldname, ip->de_Name, 11); + memcpy(ip->de_Name, toname, 11); /* update denode */ dp->de_fndoffset = to_diroffset; dp->de_fndcnt = to_count; error = createde(ip, dp, (struct denode **)0, tcnp); if (error) { - bcopy(oldname, ip->de_Name, 11); + memcpy(ip->de_Name, oldname, 11); if (newparent) VOP_UNLOCK(fdvp, 0); VOP_UNLOCK(fvp, 0); goto bad; } /* * If ip is for a directory, then its name should always * be "." since it is for the directory entry in the * directory itself (msdosfs_lookup() always translates * to the "." entry so as to get a unique denode, except * for the root directory there are different * complications). However, we just corrupted its name * to pass the correct name to createde(). Undo this. */ if ((ip->de_Attributes & ATTR_DIRECTORY) != 0) - bcopy(oldname, ip->de_Name, 11); + memcpy(ip->de_Name, oldname, 11); ip->de_refcnt++; zp->de_fndoffset = from_diroffset; error = removede(zp, ip); if (error) { /* XXX should downgrade to ro here, fs is corrupt */ if (newparent) VOP_UNLOCK(fdvp, 0); VOP_UNLOCK(fvp, 0); goto bad; } if (!doingdirectory) { error = pcbmap(dp, de_cluster(pmp, to_diroffset), 0, &ip->de_dirclust, 0); if (error) { /* XXX should downgrade to ro here, fs is corrupt */ if (newparent) VOP_UNLOCK(fdvp, 0); VOP_UNLOCK(fvp, 0); goto bad; } if (ip->de_dirclust == MSDOSFSROOT) ip->de_diroffset = to_diroffset; else ip->de_diroffset = to_diroffset & pmp->pm_crbomask; } reinsert(ip); if (newparent) VOP_UNLOCK(fdvp, 0); } /* * If we moved a directory to a new parent directory, then we must * fixup the ".." entry in the moved directory. */ if (doingdirectory && newparent) { cn = ip->de_StartCluster; if (cn == MSDOSFSROOT) { /* this should never happen */ panic("msdosfs_rename(): updating .. in root directory?"); } else bn = cntobn(pmp, cn); error = bread(pmp->pm_devvp, bn, pmp->pm_bpcluster, NOCRED, &bp); if (error) { /* XXX should downgrade to ro here, fs is corrupt */ brelse(bp); VOP_UNLOCK(fvp, 0); goto bad; } dotdotp = (struct direntry *)bp->b_data + 1; pcl = dp->de_StartCluster; if (FAT32(pmp) && pcl == pmp->pm_rootdirblk) pcl = MSDOSFSROOT; putushort(dotdotp->deStartCluster, pcl); if (FAT32(pmp)) putushort(dotdotp->deHighClust, pcl >> 16); if (DOINGASYNC(fvp)) bdwrite(bp); else if ((error = bwrite(bp)) != 0) { /* XXX should downgrade to ro here, fs is corrupt */ VOP_UNLOCK(fvp, 0); goto bad; } } /* * The msdosfs lookup is case insensitive. Several aliases may * be inserted for a single directory entry. As a consequnce, * name cache purge done by lookup for fvp when DELETE op for * namei is specified, might be not enough to expunge all * namecache entries that were installed for this direntry. */ cache_purge(fvp); VOP_UNLOCK(fvp, 0); bad: if (xp) vput(tvp); vput(tdvp); out: ip->de_flag &= ~DE_RENAME; vrele(fdvp); vrele(fvp); return (error); } static struct { struct direntry dot; struct direntry dotdot; } dosdirtemplate = { { ". ", /* the . entry */ ATTR_DIRECTORY, /* file attribute */ 0, /* reserved */ 0, { 0, 0 }, { 0, 0 }, /* create time & date */ { 0, 0 }, /* access date */ { 0, 0 }, /* high bits of start cluster */ { 210, 4 }, { 210, 4 }, /* modify time & date */ { 0, 0 }, /* startcluster */ { 0, 0, 0, 0 } /* filesize */ }, { ".. ", /* the .. entry */ ATTR_DIRECTORY, /* file attribute */ 0, /* reserved */ 0, { 0, 0 }, { 0, 0 }, /* create time & date */ { 0, 0 }, /* access date */ { 0, 0 }, /* high bits of start cluster */ { 210, 4 }, { 210, 4 }, /* modify time & date */ { 0, 0 }, /* startcluster */ { 0, 0, 0, 0 } /* filesize */ } }; static int msdosfs_mkdir(struct vop_mkdir_args *ap) { struct componentname *cnp = ap->a_cnp; struct denode *dep; struct denode *pdep = VTODE(ap->a_dvp); struct direntry *denp; struct msdosfsmount *pmp = pdep->de_pmp; struct buf *bp; u_long newcluster, pcl; int bn; int error; struct denode ndirent; struct timespec ts; /* * If this is the root directory and there is no space left we * can't do anything. This is because the root directory can not * change size. */ if (pdep->de_StartCluster == MSDOSFSROOT && pdep->de_fndoffset >= pdep->de_FileSize) { error = ENOSPC; goto bad2; } /* * Allocate a cluster to hold the about to be created directory. */ error = clusteralloc(pmp, 0, 1, CLUST_EOFE, &newcluster, NULL); if (error) goto bad2; - bzero(&ndirent, sizeof(ndirent)); + memset(&ndirent, 0, sizeof(ndirent)); ndirent.de_pmp = pmp; ndirent.de_flag = DE_ACCESS | DE_CREATE | DE_UPDATE; getnanotime(&ts); DETIMES(&ndirent, &ts, &ts, &ts); /* * Now fill the cluster with the "." and ".." entries. And write * the cluster to disk. This way it is there for the parent * directory to be pointing at if there were a crash. */ bn = cntobn(pmp, newcluster); /* always succeeds */ bp = getblk(pmp->pm_devvp, bn, pmp->pm_bpcluster, 0, 0, 0); - bzero(bp->b_data, pmp->pm_bpcluster); - bcopy(&dosdirtemplate, bp->b_data, sizeof dosdirtemplate); + memset(bp->b_data, 0, pmp->pm_bpcluster); + memcpy(bp->b_data, &dosdirtemplate, sizeof dosdirtemplate); denp = (struct direntry *)bp->b_data; putushort(denp[0].deStartCluster, newcluster); putushort(denp[0].deCDate, ndirent.de_CDate); putushort(denp[0].deCTime, ndirent.de_CTime); denp[0].deCHundredth = ndirent.de_CHun; putushort(denp[0].deADate, ndirent.de_ADate); putushort(denp[0].deMDate, ndirent.de_MDate); putushort(denp[0].deMTime, ndirent.de_MTime); pcl = pdep->de_StartCluster; /* * Although the root directory has a non-magic starting cluster * number for FAT32, chkdsk and fsck_msdosfs still require * references to it in dotdot entries to be magic. */ if (FAT32(pmp) && pcl == pmp->pm_rootdirblk) pcl = MSDOSFSROOT; putushort(denp[1].deStartCluster, pcl); putushort(denp[1].deCDate, ndirent.de_CDate); putushort(denp[1].deCTime, ndirent.de_CTime); denp[1].deCHundredth = ndirent.de_CHun; putushort(denp[1].deADate, ndirent.de_ADate); putushort(denp[1].deMDate, ndirent.de_MDate); putushort(denp[1].deMTime, ndirent.de_MTime); if (FAT32(pmp)) { putushort(denp[0].deHighClust, newcluster >> 16); putushort(denp[1].deHighClust, pcl >> 16); } if (DOINGASYNC(ap->a_dvp)) bdwrite(bp); else if ((error = bwrite(bp)) != 0) goto bad; /* * Now build up a directory entry pointing to the newly allocated * cluster. This will be written to an empty slot in the parent * directory. */ #ifdef DIAGNOSTIC if ((cnp->cn_flags & HASBUF) == 0) panic("msdosfs_mkdir: no name"); #endif error = uniqdosname(pdep, cnp, ndirent.de_Name); if (error) goto bad; ndirent.de_Attributes = ATTR_DIRECTORY; ndirent.de_LowerCase = 0; ndirent.de_StartCluster = newcluster; ndirent.de_FileSize = 0; error = createde(&ndirent, pdep, &dep, cnp); if (error) goto bad; *ap->a_vpp = DETOV(dep); return (0); bad: clusterfree(pmp, newcluster, NULL); bad2: return (error); } static int msdosfs_rmdir(struct vop_rmdir_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; struct denode *ip, *dp; int error; ip = VTODE(vp); dp = VTODE(dvp); /* * Verify the directory is empty (and valid). * (Rmdir ".." won't be valid since * ".." will contain a reference to * the current directory and thus be * non-empty.) */ error = 0; if (!dosdirempty(ip) || ip->de_flag & DE_RENAME) { error = ENOTEMPTY; goto out; } /* * Delete the entry from the directory. For dos filesystems this * gets rid of the directory entry on disk, the in memory copy * still exists but the de_refcnt is <= 0. This prevents it from * being found by deget(). When the vput() on dep is done we give * up access and eventually msdosfs_reclaim() will be called which * will remove it from the denode cache. */ error = removede(dp, ip); if (error) goto out; /* * This is where we decrement the link count in the parent * directory. Since dos filesystems don't do this we just purge * the name cache. */ cache_purge(dvp); /* * Truncate the directory that is being deleted. */ error = detrunc(ip, (u_long)0, IO_SYNC, cnp->cn_cred); cache_purge(vp); out: return (error); } /* * DOS filesystems don't know what symlinks are. */ static int msdosfs_symlink(struct vop_symlink_args *ap) { return (EOPNOTSUPP); } static int msdosfs_readdir(struct vop_readdir_args *ap) { struct mbnambuf nb; int error = 0; int diff; long n; int blsize; long on; u_long cn; uint64_t fileno; u_long dirsperblk; long bias = 0; daddr_t bn, lbn; struct buf *bp; struct denode *dep = VTODE(ap->a_vp); struct msdosfsmount *pmp = dep->de_pmp; struct direntry *dentp; struct dirent dirbuf; struct uio *uio = ap->a_uio; u_long *cookies = NULL; int ncookies = 0; off_t offset, off; int chksum = -1; #ifdef MSDOSFS_DEBUG printf("msdosfs_readdir(): vp %p, uio %p, cred %p, eofflagp %p\n", ap->a_vp, uio, ap->a_cred, ap->a_eofflag); #endif /* * msdosfs_readdir() won't operate properly on regular files since * it does i/o only with the filesystem vnode, and hence can * retrieve the wrong block from the buffer cache for a plain file. * So, fail attempts to readdir() on a plain file. */ if ((dep->de_Attributes & ATTR_DIRECTORY) == 0) return (ENOTDIR); /* * To be safe, initialize dirbuf */ - bzero(dirbuf.d_name, sizeof(dirbuf.d_name)); + memset(dirbuf.d_name, 0, sizeof(dirbuf.d_name)); /* * If the user buffer is smaller than the size of one dos directory * entry or the file offset is not a multiple of the size of a * directory entry, then we fail the read. */ off = offset = uio->uio_offset; if (uio->uio_resid < sizeof(struct direntry) || (offset & (sizeof(struct direntry) - 1))) return (EINVAL); if (ap->a_ncookies) { ncookies = uio->uio_resid / 16; cookies = malloc(ncookies * sizeof(u_long), M_TEMP, M_WAITOK); *ap->a_cookies = cookies; *ap->a_ncookies = ncookies; } dirsperblk = pmp->pm_BytesPerSec / sizeof(struct direntry); /* * If they are reading from the root directory then, we simulate * the . and .. entries since these don't exist in the root * directory. We also set the offset bias to make up for having to * simulate these entries. By this I mean that at file offset 64 we * read the first entry in the root directory that lives on disk. */ if (dep->de_StartCluster == MSDOSFSROOT || (FAT32(pmp) && dep->de_StartCluster == pmp->pm_rootdirblk)) { #if 0 printf("msdosfs_readdir(): going after . or .. in root dir, offset %d\n", offset); #endif bias = 2 * sizeof(struct direntry); if (offset < bias) { for (n = (int)offset / sizeof(struct direntry); n < 2; n++) { if (FAT32(pmp)) fileno = (uint64_t)cntobn(pmp, pmp->pm_rootdirblk) * dirsperblk; else fileno = 1; if (pmp->pm_flags & MSDOSFS_LARGEFS) { dirbuf.d_fileno = msdosfs_fileno_map(pmp->pm_mountp, fileno); } else { dirbuf.d_fileno = (uint32_t)fileno; } dirbuf.d_type = DT_DIR; switch (n) { case 0: dirbuf.d_namlen = 1; strcpy(dirbuf.d_name, "."); break; case 1: dirbuf.d_namlen = 2; strcpy(dirbuf.d_name, ".."); break; } dirbuf.d_reclen = GENERIC_DIRSIZ(&dirbuf); if (uio->uio_resid < dirbuf.d_reclen) goto out; error = uiomove(&dirbuf, dirbuf.d_reclen, uio); if (error) goto out; offset += sizeof(struct direntry); off = offset; if (cookies) { *cookies++ = offset; if (--ncookies <= 0) goto out; } } } } mbnambuf_init(&nb); off = offset; while (uio->uio_resid > 0) { lbn = de_cluster(pmp, offset - bias); on = (offset - bias) & pmp->pm_crbomask; n = min(pmp->pm_bpcluster - on, uio->uio_resid); diff = dep->de_FileSize - (offset - bias); if (diff <= 0) break; n = min(n, diff); error = pcbmap(dep, lbn, &bn, &cn, &blsize); if (error) break; error = bread(pmp->pm_devvp, bn, blsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } n = min(n, blsize - bp->b_resid); if (n == 0) { brelse(bp); return (EIO); } /* * Convert from dos directory entries to fs-independent * directory entries. */ for (dentp = (struct direntry *)(bp->b_data + on); (char *)dentp < bp->b_data + on + n; dentp++, offset += sizeof(struct direntry)) { #if 0 printf("rd: dentp %08x prev %08x crnt %08x deName %02x attr %02x\n", dentp, prev, crnt, dentp->deName[0], dentp->deAttributes); #endif /* * If this is an unused entry, we can stop. */ if (dentp->deName[0] == SLOT_EMPTY) { brelse(bp); goto out; } /* * Skip deleted entries. */ if (dentp->deName[0] == SLOT_DELETED) { chksum = -1; mbnambuf_init(&nb); continue; } /* * Handle Win95 long directory entries */ if (dentp->deAttributes == ATTR_WIN95) { if (pmp->pm_flags & MSDOSFSMNT_SHORTNAME) continue; chksum = win2unixfn(&nb, (struct winentry *)dentp, chksum, pmp); continue; } /* * Skip volume labels */ if (dentp->deAttributes & ATTR_VOLUME) { chksum = -1; mbnambuf_init(&nb); continue; } /* * This computation of d_fileno must match * the computation of va_fileid in * msdosfs_getattr. */ if (dentp->deAttributes & ATTR_DIRECTORY) { fileno = getushort(dentp->deStartCluster); if (FAT32(pmp)) fileno |= getushort(dentp->deHighClust) << 16; /* if this is the root directory */ if (fileno == MSDOSFSROOT) if (FAT32(pmp)) fileno = (uint64_t)cntobn(pmp, pmp->pm_rootdirblk) * dirsperblk; else fileno = 1; else fileno = (uint64_t)cntobn(pmp, fileno) * dirsperblk; dirbuf.d_type = DT_DIR; } else { fileno = (uoff_t)offset / sizeof(struct direntry); dirbuf.d_type = DT_REG; } if (pmp->pm_flags & MSDOSFS_LARGEFS) { dirbuf.d_fileno = msdosfs_fileno_map(pmp->pm_mountp, fileno); } else dirbuf.d_fileno = (uint32_t)fileno; if (chksum != winChksum(dentp->deName)) { dirbuf.d_namlen = dos2unixfn(dentp->deName, (u_char *)dirbuf.d_name, dentp->deLowerCase | ((pmp->pm_flags & MSDOSFSMNT_SHORTNAME) ? (LCASE_BASE | LCASE_EXT) : 0), pmp); mbnambuf_init(&nb); } else mbnambuf_flush(&nb, &dirbuf); chksum = -1; dirbuf.d_reclen = GENERIC_DIRSIZ(&dirbuf); if (uio->uio_resid < dirbuf.d_reclen) { brelse(bp); goto out; } error = uiomove(&dirbuf, dirbuf.d_reclen, uio); if (error) { brelse(bp); goto out; } if (cookies) { *cookies++ = offset + sizeof(struct direntry); if (--ncookies <= 0) { brelse(bp); goto out; } } off = offset + sizeof(struct direntry); } brelse(bp); } out: /* Subtract unused cookies */ if (ap->a_ncookies) *ap->a_ncookies -= ncookies; uio->uio_offset = off; /* * Set the eofflag (NFS uses it) */ if (ap->a_eofflag) { if (dep->de_FileSize - (offset - bias) <= 0) *ap->a_eofflag = 1; else *ap->a_eofflag = 0; } return (error); } /*- * a_vp - pointer to the file's vnode * a_bn - logical block number within the file (cluster number for us) * a_bop - where to return the bufobj of the special file containing the fs * a_bnp - where to return the "physical" block number corresponding to a_bn * (relative to the special file; units are blocks of size DEV_BSIZE) * a_runp - where to return the "run past" a_bn. This is the count of logical * blocks whose physical blocks (together with a_bn's physical block) * are contiguous. * a_runb - where to return the "run before" a_bn. */ static int msdosfs_bmap(struct vop_bmap_args *ap) { struct denode *dep; struct mount *mp; struct msdosfsmount *pmp; struct vnode *vp; daddr_t runbn; u_long cn; int bnpercn, error, maxio, maxrun, run; vp = ap->a_vp; dep = VTODE(vp); pmp = dep->de_pmp; if (ap->a_bop != NULL) *ap->a_bop = &pmp->pm_devvp->v_bufobj; if (ap->a_bnp == NULL) return (0); if (ap->a_runp != NULL) *ap->a_runp = 0; if (ap->a_runb != NULL) *ap->a_runb = 0; cn = ap->a_bn; if (cn != ap->a_bn) return (EFBIG); error = pcbmap(dep, cn, ap->a_bnp, NULL, NULL); if (error != 0 || (ap->a_runp == NULL && ap->a_runb == NULL)) return (error); mp = vp->v_mount; maxio = mp->mnt_iosize_max / mp->mnt_stat.f_iosize; bnpercn = de_cn2bn(pmp, 1); if (ap->a_runp != NULL) { maxrun = ulmin(maxio - 1, pmp->pm_maxcluster - cn); for (run = 1; run <= maxrun; run++) { if (pcbmap(dep, cn + run, &runbn, NULL, NULL) != 0 || runbn != *ap->a_bnp + run * bnpercn) break; } *ap->a_runp = run - 1; } if (ap->a_runb != NULL) { maxrun = ulmin(maxio - 1, cn); for (run = 1; run < maxrun; run++) { if (pcbmap(dep, cn - run, &runbn, NULL, NULL) != 0 || runbn != *ap->a_bnp - run * bnpercn) break; } *ap->a_runb = run - 1; } return (0); } SYSCTL_NODE(_vfs, OID_AUTO, msdosfs, CTLFLAG_RW, 0, "msdos filesystem"); static int use_buf_pager = 1; SYSCTL_INT(_vfs_msdosfs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0, "Use buffer pager instead of bmap"); static daddr_t msdosfs_gbp_getblkno(struct vnode *vp, vm_ooffset_t off) { return (de_cluster(VTODE(vp)->de_pmp, off)); } static int msdosfs_gbp_getblksz(struct vnode *vp, daddr_t lbn) { return (VTODE(vp)->de_pmp->pm_bpcluster); } static int msdosfs_getpages(struct vop_getpages_args *ap) { if (use_buf_pager) return (vfs_bio_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, ap->a_rahead, msdosfs_gbp_getblkno, msdosfs_gbp_getblksz)); return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL)); } static int msdosfs_strategy(struct vop_strategy_args *ap) { struct buf *bp = ap->a_bp; struct denode *dep = VTODE(ap->a_vp); struct bufobj *bo; int error = 0; daddr_t blkno; /* * If we don't already know the filesystem relative block number * then get it using pcbmap(). If pcbmap() returns the block * number as -1 then we've got a hole in the file. DOS filesystems * don't allow files with holes, so we shouldn't ever see this. */ if (bp->b_blkno == bp->b_lblkno) { error = pcbmap(dep, bp->b_lblkno, &blkno, 0, 0); bp->b_blkno = blkno; if (error) { bp->b_error = error; bp->b_ioflags |= BIO_ERROR; bufdone(bp); return (0); } if ((long)bp->b_blkno == -1) vfs_bio_clrbuf(bp); } if (bp->b_blkno == -1) { bufdone(bp); return (0); } /* * Read/write the block from/to the disk that contains the desired * file block. */ bp->b_iooffset = dbtob(bp->b_blkno); bo = dep->de_pmp->pm_bo; BO_STRATEGY(bo, bp); return (0); } static int msdosfs_print(struct vop_print_args *ap) { struct denode *dep = VTODE(ap->a_vp); printf("\tstartcluster %lu, dircluster %lu, diroffset %lu, ", dep->de_StartCluster, dep->de_dirclust, dep->de_diroffset); printf("on dev %s\n", devtoname(dep->de_pmp->pm_dev)); return (0); } static int msdosfs_pathconf(struct vop_pathconf_args *ap) { struct msdosfsmount *pmp = VTODE(ap->a_vp)->de_pmp; switch (ap->a_name) { case _PC_LINK_MAX: *ap->a_retval = 1; return (0); case _PC_NAME_MAX: *ap->a_retval = pmp->pm_flags & MSDOSFSMNT_LONGNAME ? WIN_MAXLEN : 12; return (0); case _PC_PATH_MAX: *ap->a_retval = PATH_MAX; return (0); case _PC_CHOWN_RESTRICTED: *ap->a_retval = 1; return (0); case _PC_NO_TRUNC: *ap->a_retval = 0; return (0); default: return (EINVAL); } /* NOTREACHED */ } static int msdosfs_vptofh(struct vop_vptofh_args *ap) { struct denode *dep; struct defid *defhp; dep = VTODE(ap->a_vp); defhp = (struct defid *)ap->a_fhp; defhp->defid_len = sizeof(struct defid); defhp->defid_dirclust = dep->de_dirclust; defhp->defid_dirofs = dep->de_diroffset; /* defhp->defid_gen = dep->de_gen; */ return (0); } /* Global vfs data structures for msdosfs */ struct vop_vector msdosfs_vnodeops = { .vop_default = &default_vnodeops, .vop_access = msdosfs_access, .vop_bmap = msdosfs_bmap, .vop_getpages = msdosfs_getpages, .vop_cachedlookup = msdosfs_lookup, .vop_open = msdosfs_open, .vop_close = msdosfs_close, .vop_create = msdosfs_create, .vop_fsync = msdosfs_fsync, .vop_fdatasync = vop_stdfdatasync_buf, .vop_getattr = msdosfs_getattr, .vop_inactive = msdosfs_inactive, .vop_link = msdosfs_link, .vop_lookup = vfs_cache_lookup, .vop_mkdir = msdosfs_mkdir, .vop_mknod = msdosfs_mknod, .vop_pathconf = msdosfs_pathconf, .vop_print = msdosfs_print, .vop_read = msdosfs_read, .vop_readdir = msdosfs_readdir, .vop_reclaim = msdosfs_reclaim, .vop_remove = msdosfs_remove, .vop_rename = msdosfs_rename, .vop_rmdir = msdosfs_rmdir, .vop_setattr = msdosfs_setattr, .vop_strategy = msdosfs_strategy, .vop_symlink = msdosfs_symlink, .vop_write = msdosfs_write, .vop_vptofh = msdosfs_vptofh, }; Index: projects/clang500-import/sys/kern/kern_proc.c =================================================================== --- projects/clang500-import/sys/kern/kern_proc.c (revision 319548) +++ projects/clang500-import/sys/kern/kern_proc.c (revision 319549) @@ -1,3125 +1,3127 @@ /*- * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ddb.h" #include "opt_ktrace.h" #include "opt_kstack_pages.h" #include "opt_stack.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include #include #include #include #include #include #include #ifdef COMPAT_FREEBSD32 #include #include #endif SDT_PROVIDER_DEFINE(proc); SDT_PROBE_DEFINE4(proc, , ctor, entry, "struct proc *", "int", "void *", "int"); SDT_PROBE_DEFINE4(proc, , ctor, return, "struct proc *", "int", "void *", "int"); SDT_PROBE_DEFINE4(proc, , dtor, entry, "struct proc *", "int", "void *", "struct thread *"); SDT_PROBE_DEFINE3(proc, , dtor, return, "struct proc *", "int", "void *"); SDT_PROBE_DEFINE3(proc, , init, entry, "struct proc *", "int", "int"); SDT_PROBE_DEFINE3(proc, , init, return, "struct proc *", "int", "int"); MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); MALLOC_DEFINE(M_SESSION, "session", "session header"); static MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); static void doenterpgrp(struct proc *, struct pgrp *); static void orphanpg(struct pgrp *pg); static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp); static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp); static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread); static void pgadjustjobc(struct pgrp *pgrp, int entering); static void pgdelete(struct pgrp *); static int proc_ctor(void *mem, int size, void *arg, int flags); static void proc_dtor(void *mem, int size, void *arg); static int proc_init(void *mem, int size, int flags); static void proc_fini(void *mem, int size); static void pargs_free(struct pargs *pa); static struct proc *zpfind_locked(pid_t pid); /* * Other process lists */ struct pidhashhead *pidhashtbl; u_long pidhash; struct pgrphashhead *pgrphashtbl; u_long pgrphash; struct proclist allproc; struct proclist zombproc; struct sx allproc_lock; struct sx proctree_lock; struct mtx ppeers_lock; uma_zone_t proc_zone; /* * The offset of various fields in struct proc and struct thread. * These are used by kernel debuggers to enumerate kernel threads and * processes. */ const int proc_off_p_pid = offsetof(struct proc, p_pid); const int proc_off_p_comm = offsetof(struct proc, p_comm); const int proc_off_p_list = offsetof(struct proc, p_list); const int proc_off_p_threads = offsetof(struct proc, p_threads); const int thread_off_td_tid = offsetof(struct thread, td_tid); const int thread_off_td_name = offsetof(struct thread, td_name); const int thread_off_td_oncpu = offsetof(struct thread, td_oncpu); const int thread_off_td_pcb = offsetof(struct thread, td_pcb); const int thread_off_td_plist = offsetof(struct thread, td_plist); int kstack_pages = KSTACK_PAGES; SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "Kernel stack size in pages"); static int vmmap_skip_res_cnt = 0; SYSCTL_INT(_kern, OID_AUTO, proc_vmmap_skip_resident_count, CTLFLAG_RW, &vmmap_skip_res_cnt, 0, "Skip calculation of the pages resident count in kern.proc.vmmap"); CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); #ifdef COMPAT_FREEBSD32 CTASSERT(sizeof(struct kinfo_proc32) == KINFO_PROC32_SIZE); #endif /* * Initialize global process hashing structures. */ void procinit(void) { sx_init(&allproc_lock, "allproc"); sx_init(&proctree_lock, "proctree"); mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); LIST_INIT(&allproc); LIST_INIT(&zombproc); pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); proc_zone = uma_zcreate("PROC", sched_sizeof_proc(), proc_ctor, proc_dtor, proc_init, proc_fini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); uihashinit(); } /* * Prepare a proc for use. */ static int proc_ctor(void *mem, int size, void *arg, int flags) { struct proc *p; struct thread *td; p = (struct proc *)mem; SDT_PROBE4(proc, , ctor , entry, p, size, arg, flags); EVENTHANDLER_INVOKE(process_ctor, p); SDT_PROBE4(proc, , ctor , return, p, size, arg, flags); td = FIRST_THREAD_IN_PROC(p); if (td != NULL) { /* Make sure all thread constructors are executed */ EVENTHANDLER_INVOKE(thread_ctor, td); } return (0); } /* * Reclaim a proc after use. */ static void proc_dtor(void *mem, int size, void *arg) { struct proc *p; struct thread *td; /* INVARIANTS checks go here */ p = (struct proc *)mem; td = FIRST_THREAD_IN_PROC(p); SDT_PROBE4(proc, , dtor, entry, p, size, arg, td); if (td != NULL) { #ifdef INVARIANTS KASSERT((p->p_numthreads == 1), ("bad number of threads in exiting process")); KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); #endif /* Free all OSD associated to this thread. */ osd_thread_exit(td); + td_softdep_cleanup(td); + MPASS(td->td_su == NULL); /* Make sure all thread destructors are executed */ EVENTHANDLER_INVOKE(thread_dtor, td); } EVENTHANDLER_INVOKE(process_dtor, p); if (p->p_ksi != NULL) KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); SDT_PROBE3(proc, , dtor, return, p, size, arg); } /* * Initialize type-stable parts of a proc (when newly created). */ static int proc_init(void *mem, int size, int flags) { struct proc *p; p = (struct proc *)mem; SDT_PROBE3(proc, , init, entry, p, size, flags); mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK | MTX_NEW); mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_NEW); mtx_init(&p->p_statmtx, "pstatl", NULL, MTX_SPIN | MTX_NEW); mtx_init(&p->p_itimmtx, "pitiml", NULL, MTX_SPIN | MTX_NEW); mtx_init(&p->p_profmtx, "pprofl", NULL, MTX_SPIN | MTX_NEW); cv_init(&p->p_pwait, "ppwait"); cv_init(&p->p_dbgwait, "dbgwait"); TAILQ_INIT(&p->p_threads); /* all threads in proc */ EVENTHANDLER_INVOKE(process_init, p); p->p_stats = pstats_alloc(); p->p_pgrp = NULL; SDT_PROBE3(proc, , init, return, p, size, flags); return (0); } /* * UMA should ensure that this function is never called. * Freeing a proc structure would violate type stability. */ static void proc_fini(void *mem, int size) { #ifdef notnow struct proc *p; p = (struct proc *)mem; EVENTHANDLER_INVOKE(process_fini, p); pstats_free(p->p_stats); thread_free(FIRST_THREAD_IN_PROC(p)); mtx_destroy(&p->p_mtx); if (p->p_ksi != NULL) ksiginfo_free(p->p_ksi); #else panic("proc reclaimed"); #endif } /* * Is p an inferior of the current process? */ int inferior(struct proc *p) { sx_assert(&proctree_lock, SX_LOCKED); PROC_LOCK_ASSERT(p, MA_OWNED); for (; p != curproc; p = proc_realparent(p)) { if (p->p_pid == 0) return (0); } return (1); } struct proc * pfind_locked(pid_t pid) { struct proc *p; sx_assert(&allproc_lock, SX_LOCKED); LIST_FOREACH(p, PIDHASH(pid), p_hash) { if (p->p_pid == pid) { PROC_LOCK(p); if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); p = NULL; } break; } } return (p); } /* * Locate a process by number; return only "live" processes -- i.e., neither * zombies nor newly born but incompletely initialized processes. By not * returning processes in the PRS_NEW state, we allow callers to avoid * testing for that condition to avoid dereferencing p_ucred, et al. */ struct proc * pfind(pid_t pid) { struct proc *p; sx_slock(&allproc_lock); p = pfind_locked(pid); sx_sunlock(&allproc_lock); return (p); } static struct proc * pfind_tid_locked(pid_t tid) { struct proc *p; struct thread *td; sx_assert(&allproc_lock, SX_LOCKED); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); continue; } FOREACH_THREAD_IN_PROC(p, td) { if (td->td_tid == tid) goto found; } PROC_UNLOCK(p); } found: return (p); } /* * Locate a process group by number. * The caller must hold proctree_lock. */ struct pgrp * pgfind(pid_t pgid) { struct pgrp *pgrp; sx_assert(&proctree_lock, SX_LOCKED); LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { if (pgrp->pg_id == pgid) { PGRP_LOCK(pgrp); return (pgrp); } } return (NULL); } /* * Locate process and do additional manipulations, depending on flags. */ int pget(pid_t pid, int flags, struct proc **pp) { struct proc *p; int error; sx_slock(&allproc_lock); if (pid <= PID_MAX) { p = pfind_locked(pid); if (p == NULL && (flags & PGET_NOTWEXIT) == 0) p = zpfind_locked(pid); } else if ((flags & PGET_NOTID) == 0) { p = pfind_tid_locked(pid); } else { p = NULL; } sx_sunlock(&allproc_lock); if (p == NULL) return (ESRCH); if ((flags & PGET_CANSEE) != 0) { error = p_cansee(curthread, p); if (error != 0) goto errout; } if ((flags & PGET_CANDEBUG) != 0) { error = p_candebug(curthread, p); if (error != 0) goto errout; } if ((flags & PGET_ISCURRENT) != 0 && curproc != p) { error = EPERM; goto errout; } if ((flags & PGET_NOTWEXIT) != 0 && (p->p_flag & P_WEXIT) != 0) { error = ESRCH; goto errout; } if ((flags & PGET_NOTINEXEC) != 0 && (p->p_flag & P_INEXEC) != 0) { /* * XXXRW: Not clear ESRCH is the right error during proc * execve(). */ error = ESRCH; goto errout; } if ((flags & PGET_HOLD) != 0) { _PHOLD(p); PROC_UNLOCK(p); } *pp = p; return (0); errout: PROC_UNLOCK(p); return (error); } /* * Create a new process group. * pgid must be equal to the pid of p. * Begin a new session if required. */ int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess) { sx_assert(&proctree_lock, SX_XLOCKED); KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL")); KASSERT(p->p_pid == pgid, ("enterpgrp: new pgrp and pid != pgid")); KASSERT(pgfind(pgid) == NULL, ("enterpgrp: pgrp with pgid exists")); KASSERT(!SESS_LEADER(p), ("enterpgrp: session leader attempted setpgrp")); mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); if (sess != NULL) { /* * new session */ mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF); PROC_LOCK(p); p->p_flag &= ~P_CONTROLT; PROC_UNLOCK(p); PGRP_LOCK(pgrp); sess->s_leader = p; sess->s_sid = p->p_pid; refcount_init(&sess->s_count, 1); sess->s_ttyvp = NULL; sess->s_ttydp = NULL; sess->s_ttyp = NULL; bcopy(p->p_session->s_login, sess->s_login, sizeof(sess->s_login)); pgrp->pg_session = sess; KASSERT(p == curproc, ("enterpgrp: mksession and p != curproc")); } else { pgrp->pg_session = p->p_session; sess_hold(pgrp->pg_session); PGRP_LOCK(pgrp); } pgrp->pg_id = pgid; LIST_INIT(&pgrp->pg_members); /* * As we have an exclusive lock of proctree_lock, * this should not deadlock. */ LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); pgrp->pg_jobc = 0; SLIST_INIT(&pgrp->pg_sigiolst); PGRP_UNLOCK(pgrp); doenterpgrp(p, pgrp); return (0); } /* * Move p to an existing process group */ int enterthispgrp(struct proc *p, struct pgrp *pgrp) { sx_assert(&proctree_lock, SX_XLOCKED); PROC_LOCK_ASSERT(p, MA_NOTOWNED); PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); KASSERT(pgrp->pg_session == p->p_session, ("%s: pgrp's session %p, p->p_session %p.\n", __func__, pgrp->pg_session, p->p_session)); KASSERT(pgrp != p->p_pgrp, ("%s: p belongs to pgrp.", __func__)); doenterpgrp(p, pgrp); return (0); } /* * Move p to a process group */ static void doenterpgrp(struct proc *p, struct pgrp *pgrp) { struct pgrp *savepgrp; sx_assert(&proctree_lock, SX_XLOCKED); PROC_LOCK_ASSERT(p, MA_NOTOWNED); PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); savepgrp = p->p_pgrp; /* * Adjust eligibility of affected pgrps to participate in job control. * Increment eligibility counts before decrementing, otherwise we * could reach 0 spuriously during the first call. */ fixjobc(p, pgrp, 1); fixjobc(p, p->p_pgrp, 0); PGRP_LOCK(pgrp); PGRP_LOCK(savepgrp); PROC_LOCK(p); LIST_REMOVE(p, p_pglist); p->p_pgrp = pgrp; PROC_UNLOCK(p); LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); PGRP_UNLOCK(savepgrp); PGRP_UNLOCK(pgrp); if (LIST_EMPTY(&savepgrp->pg_members)) pgdelete(savepgrp); } /* * remove process from process group */ int leavepgrp(struct proc *p) { struct pgrp *savepgrp; sx_assert(&proctree_lock, SX_XLOCKED); savepgrp = p->p_pgrp; PGRP_LOCK(savepgrp); PROC_LOCK(p); LIST_REMOVE(p, p_pglist); p->p_pgrp = NULL; PROC_UNLOCK(p); PGRP_UNLOCK(savepgrp); if (LIST_EMPTY(&savepgrp->pg_members)) pgdelete(savepgrp); return (0); } /* * delete a process group */ static void pgdelete(struct pgrp *pgrp) { struct session *savesess; struct tty *tp; sx_assert(&proctree_lock, SX_XLOCKED); PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); /* * Reset any sigio structures pointing to us as a result of * F_SETOWN with our pgid. */ funsetownlst(&pgrp->pg_sigiolst); PGRP_LOCK(pgrp); tp = pgrp->pg_session->s_ttyp; LIST_REMOVE(pgrp, pg_hash); savesess = pgrp->pg_session; PGRP_UNLOCK(pgrp); /* Remove the reference to the pgrp before deallocating it. */ if (tp != NULL) { tty_lock(tp); tty_rel_pgrp(tp, pgrp); } mtx_destroy(&pgrp->pg_mtx); free(pgrp, M_PGRP); sess_release(savesess); } static void pgadjustjobc(struct pgrp *pgrp, int entering) { PGRP_LOCK(pgrp); if (entering) pgrp->pg_jobc++; else { --pgrp->pg_jobc; if (pgrp->pg_jobc == 0) orphanpg(pgrp); } PGRP_UNLOCK(pgrp); } /* * Adjust pgrp jobc counters when specified process changes process group. * We count the number of processes in each process group that "qualify" * the group for terminal job control (those with a parent in a different * process group of the same session). If that count reaches zero, the * process group becomes orphaned. Check both the specified process' * process group and that of its children. * entering == 0 => p is leaving specified group. * entering == 1 => p is entering specified group. */ void fixjobc(struct proc *p, struct pgrp *pgrp, int entering) { struct pgrp *hispgrp; struct session *mysession; struct proc *q; sx_assert(&proctree_lock, SX_LOCKED); PROC_LOCK_ASSERT(p, MA_NOTOWNED); PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); /* * Check p's parent to see whether p qualifies its own process * group; if so, adjust count for p's process group. */ mysession = pgrp->pg_session; if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && hispgrp->pg_session == mysession) pgadjustjobc(pgrp, entering); /* * Check this process' children to see whether they qualify * their process groups; if so, adjust counts for children's * process groups. */ LIST_FOREACH(q, &p->p_children, p_sibling) { hispgrp = q->p_pgrp; if (hispgrp == pgrp || hispgrp->pg_session != mysession) continue; if (q->p_state == PRS_ZOMBIE) continue; pgadjustjobc(hispgrp, entering); } } void killjobc(void) { struct session *sp; struct tty *tp; struct proc *p; struct vnode *ttyvp; p = curproc; MPASS(p->p_flag & P_WEXIT); /* * Do a quick check to see if there is anything to do with the * proctree_lock held. pgrp and LIST_EMPTY checks are for fixjobc(). */ PROC_LOCK(p); if (!SESS_LEADER(p) && (p->p_pgrp == p->p_pptr->p_pgrp) && LIST_EMPTY(&p->p_children)) { PROC_UNLOCK(p); return; } PROC_UNLOCK(p); sx_xlock(&proctree_lock); if (SESS_LEADER(p)) { sp = p->p_session; /* * s_ttyp is not zero'd; we use this to indicate that * the session once had a controlling terminal. (for * logging and informational purposes) */ SESS_LOCK(sp); ttyvp = sp->s_ttyvp; tp = sp->s_ttyp; sp->s_ttyvp = NULL; sp->s_ttydp = NULL; sp->s_leader = NULL; SESS_UNLOCK(sp); /* * Signal foreground pgrp and revoke access to * controlling terminal if it has not been revoked * already. * * Because the TTY may have been revoked in the mean * time and could already have a new session associated * with it, make sure we don't send a SIGHUP to a * foreground process group that does not belong to this * session. */ if (tp != NULL) { tty_lock(tp); if (tp->t_session == sp) tty_signal_pgrp(tp, SIGHUP); tty_unlock(tp); } if (ttyvp != NULL) { sx_xunlock(&proctree_lock); if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) { VOP_REVOKE(ttyvp, REVOKEALL); VOP_UNLOCK(ttyvp, 0); } vrele(ttyvp); sx_xlock(&proctree_lock); } } fixjobc(p, p->p_pgrp, 0); sx_xunlock(&proctree_lock); } /* * A process group has become orphaned; * if there are any stopped processes in the group, * hang-up all process in that group. */ static void orphanpg(struct pgrp *pg) { struct proc *p; PGRP_LOCK_ASSERT(pg, MA_OWNED); LIST_FOREACH(p, &pg->pg_members, p_pglist) { PROC_LOCK(p); if (P_SHOULDSTOP(p) == P_STOPPED_SIG) { PROC_UNLOCK(p); LIST_FOREACH(p, &pg->pg_members, p_pglist) { PROC_LOCK(p); kern_psignal(p, SIGHUP); kern_psignal(p, SIGCONT); PROC_UNLOCK(p); } return; } PROC_UNLOCK(p); } } void sess_hold(struct session *s) { refcount_acquire(&s->s_count); } void sess_release(struct session *s) { if (refcount_release(&s->s_count)) { if (s->s_ttyp != NULL) { tty_lock(s->s_ttyp); tty_rel_sess(s->s_ttyp, s); } mtx_destroy(&s->s_mtx); free(s, M_SESSION); } } #ifdef DDB DB_SHOW_COMMAND(pgrpdump, pgrpdump) { struct pgrp *pgrp; struct proc *p; int i; for (i = 0; i <= pgrphash; i++) { if (!LIST_EMPTY(&pgrphashtbl[i])) { printf("\tindx %d\n", i); LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { printf( "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", (void *)pgrp, (long)pgrp->pg_id, (void *)pgrp->pg_session, pgrp->pg_session->s_count, (void *)LIST_FIRST(&pgrp->pg_members)); LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { printf("\t\tpid %ld addr %p pgrp %p\n", (long)p->p_pid, (void *)p, (void *)p->p_pgrp); } } } } } #endif /* DDB */ /* * Calculate the kinfo_proc members which contain process-wide * informations. * Must be called with the target process locked. */ static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp) { struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); kp->ki_estcpu = 0; kp->ki_pctcpu = 0; FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); kp->ki_pctcpu += sched_pctcpu(td); kp->ki_estcpu += sched_estcpu(td); thread_unlock(td); } } /* * Clear kinfo_proc and fill in any information that is common * to all threads in the process. * Must be called with the target process locked. */ static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) { struct thread *td0; struct tty *tp; struct session *sp; struct ucred *cred; struct sigacts *ps; struct timeval boottime; /* For proc_realparent. */ sx_assert(&proctree_lock, SX_LOCKED); PROC_LOCK_ASSERT(p, MA_OWNED); bzero(kp, sizeof(*kp)); kp->ki_structsize = sizeof(*kp); kp->ki_paddr = p; kp->ki_addr =/* p->p_addr; */0; /* XXX */ kp->ki_args = p->p_args; kp->ki_textvp = p->p_textvp; #ifdef KTRACE kp->ki_tracep = p->p_tracevp; kp->ki_traceflag = p->p_traceflag; #endif kp->ki_fd = p->p_fd; kp->ki_vmspace = p->p_vmspace; kp->ki_flag = p->p_flag; kp->ki_flag2 = p->p_flag2; cred = p->p_ucred; if (cred) { kp->ki_uid = cred->cr_uid; kp->ki_ruid = cred->cr_ruid; kp->ki_svuid = cred->cr_svuid; kp->ki_cr_flags = 0; if (cred->cr_flags & CRED_FLAG_CAPMODE) kp->ki_cr_flags |= KI_CRF_CAPABILITY_MODE; /* XXX bde doesn't like KI_NGROUPS */ if (cred->cr_ngroups > KI_NGROUPS) { kp->ki_ngroups = KI_NGROUPS; kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW; } else kp->ki_ngroups = cred->cr_ngroups; bcopy(cred->cr_groups, kp->ki_groups, kp->ki_ngroups * sizeof(gid_t)); kp->ki_rgid = cred->cr_rgid; kp->ki_svgid = cred->cr_svgid; /* If jailed(cred), emulate the old P_JAILED flag. */ if (jailed(cred)) { kp->ki_flag |= P_JAILED; /* If inside the jail, use 0 as a jail ID. */ if (cred->cr_prison != curthread->td_ucred->cr_prison) kp->ki_jid = cred->cr_prison->pr_id; } strlcpy(kp->ki_loginclass, cred->cr_loginclass->lc_name, sizeof(kp->ki_loginclass)); } ps = p->p_sigacts; if (ps) { mtx_lock(&ps->ps_mtx); kp->ki_sigignore = ps->ps_sigignore; kp->ki_sigcatch = ps->ps_sigcatch; mtx_unlock(&ps->ps_mtx); } if (p->p_state != PRS_NEW && p->p_state != PRS_ZOMBIE && p->p_vmspace != NULL) { struct vmspace *vm = p->p_vmspace; kp->ki_size = vm->vm_map.size; kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/ FOREACH_THREAD_IN_PROC(p, td0) { if (!TD_IS_SWAPPED(td0)) kp->ki_rssize += td0->td_kstack_pages; } kp->ki_swrss = vm->vm_swrss; kp->ki_tsize = vm->vm_tsize; kp->ki_dsize = vm->vm_dsize; kp->ki_ssize = vm->vm_ssize; } else if (p->p_state == PRS_ZOMBIE) kp->ki_stat = SZOMB; if (kp->ki_flag & P_INMEM) kp->ki_sflag = PS_INMEM; else kp->ki_sflag = 0; /* Calculate legacy swtime as seconds since 'swtick'. */ kp->ki_swtime = (ticks - p->p_swtick) / hz; kp->ki_pid = p->p_pid; kp->ki_nice = p->p_nice; kp->ki_fibnum = p->p_fibnum; kp->ki_start = p->p_stats->p_start; getboottime(&boottime); timevaladd(&kp->ki_start, &boottime); PROC_STATLOCK(p); rufetch(p, &kp->ki_rusage); kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime); calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime); PROC_STATUNLOCK(p); calccru(p, &kp->ki_childutime, &kp->ki_childstime); /* Some callers want child times in a single value. */ kp->ki_childtime = kp->ki_childstime; timevaladd(&kp->ki_childtime, &kp->ki_childutime); FOREACH_THREAD_IN_PROC(p, td0) kp->ki_cow += td0->td_cow; tp = NULL; if (p->p_pgrp) { kp->ki_pgid = p->p_pgrp->pg_id; kp->ki_jobc = p->p_pgrp->pg_jobc; sp = p->p_pgrp->pg_session; if (sp != NULL) { kp->ki_sid = sp->s_sid; SESS_LOCK(sp); strlcpy(kp->ki_login, sp->s_login, sizeof(kp->ki_login)); if (sp->s_ttyvp) kp->ki_kiflag |= KI_CTTY; if (SESS_LEADER(p)) kp->ki_kiflag |= KI_SLEADER; /* XXX proctree_lock */ tp = sp->s_ttyp; SESS_UNLOCK(sp); } } if ((p->p_flag & P_CONTROLT) && tp != NULL) { kp->ki_tdev = tty_udev(tp); kp->ki_tdev_freebsd11 = kp->ki_tdev; /* truncate */ kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; if (tp->t_session) kp->ki_tsid = tp->t_session->s_sid; } else { kp->ki_tdev = NODEV; kp->ki_tdev_freebsd11 = kp->ki_tdev; /* truncate */ } if (p->p_comm[0] != '\0') strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm)); if (p->p_sysent && p->p_sysent->sv_name != NULL && p->p_sysent->sv_name[0] != '\0') strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul)); kp->ki_siglist = p->p_siglist; kp->ki_xstat = KW_EXITCODE(p->p_xexit, p->p_xsig); kp->ki_acflag = p->p_acflag; kp->ki_lock = p->p_lock; if (p->p_pptr) { kp->ki_ppid = proc_realparent(p)->p_pid; if (p->p_flag & P_TRACED) kp->ki_tracer = p->p_pptr->p_pid; } } /* * Fill in information that is thread specific. Must be called with * target process locked. If 'preferthread' is set, overwrite certain * process-related fields that are maintained for both threads and * processes. */ static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread) { struct proc *p; p = td->td_proc; kp->ki_tdaddr = td; PROC_LOCK_ASSERT(p, MA_OWNED); if (preferthread) PROC_STATLOCK(p); thread_lock(td); if (td->td_wmesg != NULL) strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg)); else bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg)); if (strlcpy(kp->ki_tdname, td->td_name, sizeof(kp->ki_tdname)) >= sizeof(kp->ki_tdname)) { strlcpy(kp->ki_moretdname, td->td_name + sizeof(kp->ki_tdname) - 1, sizeof(kp->ki_moretdname)); } else { bzero(kp->ki_moretdname, sizeof(kp->ki_moretdname)); } if (TD_ON_LOCK(td)) { kp->ki_kiflag |= KI_LOCKBLOCK; strlcpy(kp->ki_lockname, td->td_lockname, sizeof(kp->ki_lockname)); } else { kp->ki_kiflag &= ~KI_LOCKBLOCK; bzero(kp->ki_lockname, sizeof(kp->ki_lockname)); } if (p->p_state == PRS_NORMAL) { /* approximate. */ if (TD_ON_RUNQ(td) || TD_CAN_RUN(td) || TD_IS_RUNNING(td)) { kp->ki_stat = SRUN; } else if (P_SHOULDSTOP(p)) { kp->ki_stat = SSTOP; } else if (TD_IS_SLEEPING(td)) { kp->ki_stat = SSLEEP; } else if (TD_ON_LOCK(td)) { kp->ki_stat = SLOCK; } else { kp->ki_stat = SWAIT; } } else if (p->p_state == PRS_ZOMBIE) { kp->ki_stat = SZOMB; } else { kp->ki_stat = SIDL; } /* Things in the thread */ kp->ki_wchan = td->td_wchan; kp->ki_pri.pri_level = td->td_priority; kp->ki_pri.pri_native = td->td_base_pri; /* * Note: legacy fields; clamp at the old NOCPU value and/or * the maximum u_char CPU value. */ if (td->td_lastcpu == NOCPU) kp->ki_lastcpu_old = NOCPU_OLD; else if (td->td_lastcpu > MAXCPU_OLD) kp->ki_lastcpu_old = MAXCPU_OLD; else kp->ki_lastcpu_old = td->td_lastcpu; if (td->td_oncpu == NOCPU) kp->ki_oncpu_old = NOCPU_OLD; else if (td->td_oncpu > MAXCPU_OLD) kp->ki_oncpu_old = MAXCPU_OLD; else kp->ki_oncpu_old = td->td_oncpu; kp->ki_lastcpu = td->td_lastcpu; kp->ki_oncpu = td->td_oncpu; kp->ki_tdflags = td->td_flags; kp->ki_tid = td->td_tid; kp->ki_numthreads = p->p_numthreads; kp->ki_pcb = td->td_pcb; kp->ki_kstack = (void *)td->td_kstack; kp->ki_slptime = (ticks - td->td_slptick) / hz; kp->ki_pri.pri_class = td->td_pri_class; kp->ki_pri.pri_user = td->td_user_pri; if (preferthread) { rufetchtd(td, &kp->ki_rusage); kp->ki_runtime = cputick2usec(td->td_rux.rux_runtime); kp->ki_pctcpu = sched_pctcpu(td); kp->ki_estcpu = sched_estcpu(td); kp->ki_cow = td->td_cow; } /* We can't get this anymore but ps etc never used it anyway. */ kp->ki_rqindex = 0; if (preferthread) kp->ki_siglist = td->td_siglist; kp->ki_sigmask = td->td_sigmask; thread_unlock(td); if (preferthread) PROC_STATUNLOCK(p); } /* * Fill in a kinfo_proc structure for the specified process. * Must be called with the target process locked. */ void fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) { MPASS(FIRST_THREAD_IN_PROC(p) != NULL); fill_kinfo_proc_only(p, kp); fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0); fill_kinfo_aggregate(p, kp); } struct pstats * pstats_alloc(void) { return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK)); } /* * Copy parts of p_stats; zero the rest of p_stats (statistics). */ void pstats_fork(struct pstats *src, struct pstats *dst) { bzero(&dst->pstat_startzero, __rangeof(struct pstats, pstat_startzero, pstat_endzero)); bcopy(&src->pstat_startcopy, &dst->pstat_startcopy, __rangeof(struct pstats, pstat_startcopy, pstat_endcopy)); } void pstats_free(struct pstats *ps) { free(ps, M_SUBPROC); } static struct proc * zpfind_locked(pid_t pid) { struct proc *p; sx_assert(&allproc_lock, SX_LOCKED); LIST_FOREACH(p, &zombproc, p_list) { if (p->p_pid == pid) { PROC_LOCK(p); break; } } return (p); } /* * Locate a zombie process by number */ struct proc * zpfind(pid_t pid) { struct proc *p; sx_slock(&allproc_lock); p = zpfind_locked(pid); sx_sunlock(&allproc_lock); return (p); } #ifdef COMPAT_FREEBSD32 /* * This function is typically used to copy out the kernel address, so * it can be replaced by assignment of zero. */ static inline uint32_t ptr32_trim(void *ptr) { uintptr_t uptr; uptr = (uintptr_t)ptr; return ((uptr > UINT_MAX) ? 0 : uptr); } #define PTRTRIM_CP(src,dst,fld) \ do { (dst).fld = ptr32_trim((src).fld); } while (0) static void freebsd32_kinfo_proc_out(const struct kinfo_proc *ki, struct kinfo_proc32 *ki32) { int i; bzero(ki32, sizeof(struct kinfo_proc32)); ki32->ki_structsize = sizeof(struct kinfo_proc32); CP(*ki, *ki32, ki_layout); PTRTRIM_CP(*ki, *ki32, ki_args); PTRTRIM_CP(*ki, *ki32, ki_paddr); PTRTRIM_CP(*ki, *ki32, ki_addr); PTRTRIM_CP(*ki, *ki32, ki_tracep); PTRTRIM_CP(*ki, *ki32, ki_textvp); PTRTRIM_CP(*ki, *ki32, ki_fd); PTRTRIM_CP(*ki, *ki32, ki_vmspace); PTRTRIM_CP(*ki, *ki32, ki_wchan); CP(*ki, *ki32, ki_pid); CP(*ki, *ki32, ki_ppid); CP(*ki, *ki32, ki_pgid); CP(*ki, *ki32, ki_tpgid); CP(*ki, *ki32, ki_sid); CP(*ki, *ki32, ki_tsid); CP(*ki, *ki32, ki_jobc); CP(*ki, *ki32, ki_tdev); CP(*ki, *ki32, ki_tdev_freebsd11); CP(*ki, *ki32, ki_siglist); CP(*ki, *ki32, ki_sigmask); CP(*ki, *ki32, ki_sigignore); CP(*ki, *ki32, ki_sigcatch); CP(*ki, *ki32, ki_uid); CP(*ki, *ki32, ki_ruid); CP(*ki, *ki32, ki_svuid); CP(*ki, *ki32, ki_rgid); CP(*ki, *ki32, ki_svgid); CP(*ki, *ki32, ki_ngroups); for (i = 0; i < KI_NGROUPS; i++) CP(*ki, *ki32, ki_groups[i]); CP(*ki, *ki32, ki_size); CP(*ki, *ki32, ki_rssize); CP(*ki, *ki32, ki_swrss); CP(*ki, *ki32, ki_tsize); CP(*ki, *ki32, ki_dsize); CP(*ki, *ki32, ki_ssize); CP(*ki, *ki32, ki_xstat); CP(*ki, *ki32, ki_acflag); CP(*ki, *ki32, ki_pctcpu); CP(*ki, *ki32, ki_estcpu); CP(*ki, *ki32, ki_slptime); CP(*ki, *ki32, ki_swtime); CP(*ki, *ki32, ki_cow); CP(*ki, *ki32, ki_runtime); TV_CP(*ki, *ki32, ki_start); TV_CP(*ki, *ki32, ki_childtime); CP(*ki, *ki32, ki_flag); CP(*ki, *ki32, ki_kiflag); CP(*ki, *ki32, ki_traceflag); CP(*ki, *ki32, ki_stat); CP(*ki, *ki32, ki_nice); CP(*ki, *ki32, ki_lock); CP(*ki, *ki32, ki_rqindex); CP(*ki, *ki32, ki_oncpu); CP(*ki, *ki32, ki_lastcpu); /* XXX TODO: wrap cpu value as appropriate */ CP(*ki, *ki32, ki_oncpu_old); CP(*ki, *ki32, ki_lastcpu_old); bcopy(ki->ki_tdname, ki32->ki_tdname, TDNAMLEN + 1); bcopy(ki->ki_wmesg, ki32->ki_wmesg, WMESGLEN + 1); bcopy(ki->ki_login, ki32->ki_login, LOGNAMELEN + 1); bcopy(ki->ki_lockname, ki32->ki_lockname, LOCKNAMELEN + 1); bcopy(ki->ki_comm, ki32->ki_comm, COMMLEN + 1); bcopy(ki->ki_emul, ki32->ki_emul, KI_EMULNAMELEN + 1); bcopy(ki->ki_loginclass, ki32->ki_loginclass, LOGINCLASSLEN + 1); bcopy(ki->ki_moretdname, ki32->ki_moretdname, MAXCOMLEN - TDNAMLEN + 1); CP(*ki, *ki32, ki_tracer); CP(*ki, *ki32, ki_flag2); CP(*ki, *ki32, ki_fibnum); CP(*ki, *ki32, ki_cr_flags); CP(*ki, *ki32, ki_jid); CP(*ki, *ki32, ki_numthreads); CP(*ki, *ki32, ki_tid); CP(*ki, *ki32, ki_pri); freebsd32_rusage_out(&ki->ki_rusage, &ki32->ki_rusage); freebsd32_rusage_out(&ki->ki_rusage_ch, &ki32->ki_rusage_ch); PTRTRIM_CP(*ki, *ki32, ki_pcb); PTRTRIM_CP(*ki, *ki32, ki_kstack); PTRTRIM_CP(*ki, *ki32, ki_udata); CP(*ki, *ki32, ki_sflag); CP(*ki, *ki32, ki_tdflags); } #endif int kern_proc_out(struct proc *p, struct sbuf *sb, int flags) { struct thread *td; struct kinfo_proc ki; #ifdef COMPAT_FREEBSD32 struct kinfo_proc32 ki32; #endif int error; PROC_LOCK_ASSERT(p, MA_OWNED); MPASS(FIRST_THREAD_IN_PROC(p) != NULL); error = 0; fill_kinfo_proc(p, &ki); if ((flags & KERN_PROC_NOTHREADS) != 0) { #ifdef COMPAT_FREEBSD32 if ((flags & KERN_PROC_MASK32) != 0) { freebsd32_kinfo_proc_out(&ki, &ki32); if (sbuf_bcat(sb, &ki32, sizeof(ki32)) != 0) error = ENOMEM; } else #endif if (sbuf_bcat(sb, &ki, sizeof(ki)) != 0) error = ENOMEM; } else { FOREACH_THREAD_IN_PROC(p, td) { fill_kinfo_thread(td, &ki, 1); #ifdef COMPAT_FREEBSD32 if ((flags & KERN_PROC_MASK32) != 0) { freebsd32_kinfo_proc_out(&ki, &ki32); if (sbuf_bcat(sb, &ki32, sizeof(ki32)) != 0) error = ENOMEM; } else #endif if (sbuf_bcat(sb, &ki, sizeof(ki)) != 0) error = ENOMEM; if (error != 0) break; } } PROC_UNLOCK(p); return (error); } static int sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags, int doingzomb) { struct sbuf sb; struct kinfo_proc ki; struct proc *np; int error, error2; pid_t pid; pid = p->p_pid; sbuf_new_for_sysctl(&sb, (char *)&ki, sizeof(ki), req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = kern_proc_out(p, &sb, flags); error2 = sbuf_finish(&sb); sbuf_delete(&sb); if (error != 0) return (error); else if (error2 != 0) return (error2); if (doingzomb) np = zpfind(pid); else { if (pid == 0) return (0); np = pfind(pid); } if (np == NULL) return (ESRCH); if (np != p) { PROC_UNLOCK(np); return (ESRCH); } PROC_UNLOCK(np); return (0); } static int sysctl_kern_proc(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; int flags, doingzomb, oid_number; int error = 0; oid_number = oidp->oid_number; if (oid_number != KERN_PROC_ALL && (oid_number & KERN_PROC_INC_THREAD) == 0) flags = KERN_PROC_NOTHREADS; else { flags = 0; oid_number &= ~KERN_PROC_INC_THREAD; } #ifdef COMPAT_FREEBSD32 if (req->flags & SCTL_MASK32) flags |= KERN_PROC_MASK32; #endif if (oid_number == KERN_PROC_PID) { if (namelen != 1) return (EINVAL); error = sysctl_wire_old_buffer(req, 0); if (error) return (error); sx_slock(&proctree_lock); error = pget((pid_t)name[0], PGET_CANSEE, &p); if (error == 0) error = sysctl_out_proc(p, req, flags, 0); sx_sunlock(&proctree_lock); return (error); } switch (oid_number) { case KERN_PROC_ALL: if (namelen != 0) return (EINVAL); break; case KERN_PROC_PROC: if (namelen != 0 && namelen != 1) return (EINVAL); break; default: if (namelen != 1) return (EINVAL); break; } if (!req->oldptr) { /* overestimate by 5 procs */ error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); if (error) return (error); } error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sx_slock(&proctree_lock); sx_slock(&allproc_lock); for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) { if (!doingzomb) p = LIST_FIRST(&allproc); else p = LIST_FIRST(&zombproc); for (; p != NULL; p = LIST_NEXT(p, p_list)) { /* * Skip embryonic processes. */ PROC_LOCK(p); if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); continue; } KASSERT(p->p_ucred != NULL, ("process credential is NULL for non-NEW proc")); /* * Show a user only appropriate processes. */ if (p_cansee(curthread, p)) { PROC_UNLOCK(p); continue; } /* * TODO - make more efficient (see notes below). * do by session. */ switch (oid_number) { case KERN_PROC_GID: if (p->p_ucred->cr_gid != (gid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_PGRP: /* could do this by traversing pgrp */ if (p->p_pgrp == NULL || p->p_pgrp->pg_id != (pid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_RGID: if (p->p_ucred->cr_rgid != (gid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_SESSION: if (p->p_session == NULL || p->p_session->s_sid != (pid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_TTY: if ((p->p_flag & P_CONTROLT) == 0 || p->p_session == NULL) { PROC_UNLOCK(p); continue; } /* XXX proctree_lock */ SESS_LOCK(p->p_session); if (p->p_session->s_ttyp == NULL || tty_udev(p->p_session->s_ttyp) != (dev_t)name[0]) { SESS_UNLOCK(p->p_session); PROC_UNLOCK(p); continue; } SESS_UNLOCK(p->p_session); break; case KERN_PROC_UID: if (p->p_ucred->cr_uid != (uid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_RUID: if (p->p_ucred->cr_ruid != (uid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_PROC: break; default: break; } error = sysctl_out_proc(p, req, flags, doingzomb); if (error) { sx_sunlock(&allproc_lock); sx_sunlock(&proctree_lock); return (error); } } } sx_sunlock(&allproc_lock); sx_sunlock(&proctree_lock); return (0); } struct pargs * pargs_alloc(int len) { struct pargs *pa; pa = malloc(sizeof(struct pargs) + len, M_PARGS, M_WAITOK); refcount_init(&pa->ar_ref, 1); pa->ar_length = len; return (pa); } static void pargs_free(struct pargs *pa) { free(pa, M_PARGS); } void pargs_hold(struct pargs *pa) { if (pa == NULL) return; refcount_acquire(&pa->ar_ref); } void pargs_drop(struct pargs *pa) { if (pa == NULL) return; if (refcount_release(&pa->ar_ref)) pargs_free(pa); } static int proc_read_string(struct thread *td, struct proc *p, const char *sptr, char *buf, size_t len) { ssize_t n; /* * This may return a short read if the string is shorter than the chunk * and is aligned at the end of the page, and the following page is not * mapped. */ n = proc_readmem(td, p, (vm_offset_t)sptr, buf, len); if (n <= 0) return (ENOMEM); return (0); } #define PROC_AUXV_MAX 256 /* Safety limit on auxv size. */ enum proc_vector_type { PROC_ARG, PROC_ENV, PROC_AUX, }; #ifdef COMPAT_FREEBSD32 static int get_proc_vector32(struct thread *td, struct proc *p, char ***proc_vectorp, size_t *vsizep, enum proc_vector_type type) { struct freebsd32_ps_strings pss; Elf32_Auxinfo aux; vm_offset_t vptr, ptr; uint32_t *proc_vector32; char **proc_vector; size_t vsize, size; int i, error; error = 0; if (proc_readmem(td, p, (vm_offset_t)p->p_sysent->sv_psstrings, &pss, sizeof(pss)) != sizeof(pss)) return (ENOMEM); switch (type) { case PROC_ARG: vptr = (vm_offset_t)PTRIN(pss.ps_argvstr); vsize = pss.ps_nargvstr; if (vsize > ARG_MAX) return (ENOEXEC); size = vsize * sizeof(int32_t); break; case PROC_ENV: vptr = (vm_offset_t)PTRIN(pss.ps_envstr); vsize = pss.ps_nenvstr; if (vsize > ARG_MAX) return (ENOEXEC); size = vsize * sizeof(int32_t); break; case PROC_AUX: vptr = (vm_offset_t)PTRIN(pss.ps_envstr) + (pss.ps_nenvstr + 1) * sizeof(int32_t); if (vptr % 4 != 0) return (ENOEXEC); for (ptr = vptr, i = 0; i < PROC_AUXV_MAX; i++) { if (proc_readmem(td, p, ptr, &aux, sizeof(aux)) != sizeof(aux)) return (ENOMEM); if (aux.a_type == AT_NULL) break; ptr += sizeof(aux); } if (aux.a_type != AT_NULL) return (ENOEXEC); vsize = i + 1; size = vsize * sizeof(aux); break; default: KASSERT(0, ("Wrong proc vector type: %d", type)); return (EINVAL); } proc_vector32 = malloc(size, M_TEMP, M_WAITOK); if (proc_readmem(td, p, vptr, proc_vector32, size) != size) { error = ENOMEM; goto done; } if (type == PROC_AUX) { *proc_vectorp = (char **)proc_vector32; *vsizep = vsize; return (0); } proc_vector = malloc(vsize * sizeof(char *), M_TEMP, M_WAITOK); for (i = 0; i < (int)vsize; i++) proc_vector[i] = PTRIN(proc_vector32[i]); *proc_vectorp = proc_vector; *vsizep = vsize; done: free(proc_vector32, M_TEMP); return (error); } #endif static int get_proc_vector(struct thread *td, struct proc *p, char ***proc_vectorp, size_t *vsizep, enum proc_vector_type type) { struct ps_strings pss; Elf_Auxinfo aux; vm_offset_t vptr, ptr; char **proc_vector; size_t vsize, size; int i; #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32) != 0) return (get_proc_vector32(td, p, proc_vectorp, vsizep, type)); #endif if (proc_readmem(td, p, (vm_offset_t)p->p_sysent->sv_psstrings, &pss, sizeof(pss)) != sizeof(pss)) return (ENOMEM); switch (type) { case PROC_ARG: vptr = (vm_offset_t)pss.ps_argvstr; vsize = pss.ps_nargvstr; if (vsize > ARG_MAX) return (ENOEXEC); size = vsize * sizeof(char *); break; case PROC_ENV: vptr = (vm_offset_t)pss.ps_envstr; vsize = pss.ps_nenvstr; if (vsize > ARG_MAX) return (ENOEXEC); size = vsize * sizeof(char *); break; case PROC_AUX: /* * The aux array is just above env array on the stack. Check * that the address is naturally aligned. */ vptr = (vm_offset_t)pss.ps_envstr + (pss.ps_nenvstr + 1) * sizeof(char *); #if __ELF_WORD_SIZE == 64 if (vptr % sizeof(uint64_t) != 0) #else if (vptr % sizeof(uint32_t) != 0) #endif return (ENOEXEC); /* * We count the array size reading the aux vectors from the * stack until AT_NULL vector is returned. So (to keep the code * simple) we read the process stack twice: the first time here * to find the size and the second time when copying the vectors * to the allocated proc_vector. */ for (ptr = vptr, i = 0; i < PROC_AUXV_MAX; i++) { if (proc_readmem(td, p, ptr, &aux, sizeof(aux)) != sizeof(aux)) return (ENOMEM); if (aux.a_type == AT_NULL) break; ptr += sizeof(aux); } /* * If the PROC_AUXV_MAX entries are iterated over, and we have * not reached AT_NULL, it is most likely we are reading wrong * data: either the process doesn't have auxv array or data has * been modified. Return the error in this case. */ if (aux.a_type != AT_NULL) return (ENOEXEC); vsize = i + 1; size = vsize * sizeof(aux); break; default: KASSERT(0, ("Wrong proc vector type: %d", type)); return (EINVAL); /* In case we are built without INVARIANTS. */ } proc_vector = malloc(size, M_TEMP, M_WAITOK); if (proc_readmem(td, p, vptr, proc_vector, size) != size) { free(proc_vector, M_TEMP); return (ENOMEM); } *proc_vectorp = proc_vector; *vsizep = vsize; return (0); } #define GET_PS_STRINGS_CHUNK_SZ 256 /* Chunk size (bytes) for ps_strings operations. */ static int get_ps_strings(struct thread *td, struct proc *p, struct sbuf *sb, enum proc_vector_type type) { size_t done, len, nchr, vsize; int error, i; char **proc_vector, *sptr; char pss_string[GET_PS_STRINGS_CHUNK_SZ]; PROC_ASSERT_HELD(p); /* * We are not going to read more than 2 * (PATH_MAX + ARG_MAX) bytes. */ nchr = 2 * (PATH_MAX + ARG_MAX); error = get_proc_vector(td, p, &proc_vector, &vsize, type); if (error != 0) return (error); for (done = 0, i = 0; i < (int)vsize && done < nchr; i++) { /* * The program may have scribbled into its argv array, e.g. to * remove some arguments. If that has happened, break out * before trying to read from NULL. */ if (proc_vector[i] == NULL) break; for (sptr = proc_vector[i]; ; sptr += GET_PS_STRINGS_CHUNK_SZ) { error = proc_read_string(td, p, sptr, pss_string, sizeof(pss_string)); if (error != 0) goto done; len = strnlen(pss_string, GET_PS_STRINGS_CHUNK_SZ); if (done + len >= nchr) len = nchr - done - 1; sbuf_bcat(sb, pss_string, len); if (len != GET_PS_STRINGS_CHUNK_SZ) break; done += GET_PS_STRINGS_CHUNK_SZ; } sbuf_bcat(sb, "", 1); done += len + 1; } done: free(proc_vector, M_TEMP); return (error); } int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb) { return (get_ps_strings(curthread, p, sb, PROC_ARG)); } int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb) { return (get_ps_strings(curthread, p, sb, PROC_ENV)); } int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb) { size_t vsize, size; char **auxv; int error; error = get_proc_vector(td, p, &auxv, &vsize, PROC_AUX); if (error == 0) { #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32) != 0) size = vsize * sizeof(Elf32_Auxinfo); else #endif size = vsize * sizeof(Elf_Auxinfo); if (sbuf_bcat(sb, auxv, size) != 0) error = ENOMEM; free(auxv, M_TEMP); } return (error); } /* * This sysctl allows a process to retrieve the argument list or process * title for another process without groping around in the address space * of the other process. It also allow a process to set its own "process * title to a string of its own choice. */ static int sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct pargs *newpa, *pa; struct proc *p; struct sbuf sb; int flags, error = 0, error2; if (namelen != 1) return (EINVAL); flags = PGET_CANSEE; if (req->newptr != NULL) flags |= PGET_ISCURRENT; error = pget((pid_t)name[0], flags, &p); if (error) return (error); pa = p->p_args; if (pa != NULL) { pargs_hold(pa); PROC_UNLOCK(p); error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); pargs_drop(pa); } else if ((p->p_flag & (P_WEXIT | P_SYSTEM)) == 0) { _PHOLD(p); PROC_UNLOCK(p); sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = proc_getargv(curthread, p, &sb); error2 = sbuf_finish(&sb); PRELE(p); sbuf_delete(&sb); if (error == 0 && error2 != 0) error = error2; } else { PROC_UNLOCK(p); } if (error != 0 || req->newptr == NULL) return (error); if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) return (ENOMEM); newpa = pargs_alloc(req->newlen); error = SYSCTL_IN(req, newpa->ar_args, req->newlen); if (error != 0) { pargs_free(newpa); return (error); } PROC_LOCK(p); pa = p->p_args; p->p_args = newpa; PROC_UNLOCK(p); pargs_drop(pa); return (0); } /* * This sysctl allows a process to retrieve environment of another process. */ static int sysctl_kern_proc_env(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; struct sbuf sb; int error, error2; if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_WANTREAD, &p); if (error != 0) return (error); if ((p->p_flag & P_SYSTEM) != 0) { PRELE(p); return (0); } sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = proc_getenvv(curthread, p, &sb); error2 = sbuf_finish(&sb); PRELE(p); sbuf_delete(&sb); return (error != 0 ? error : error2); } /* * This sysctl allows a process to retrieve ELF auxiliary vector of * another process. */ static int sysctl_kern_proc_auxv(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; struct sbuf sb; int error, error2; if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_WANTREAD, &p); if (error != 0) return (error); if ((p->p_flag & P_SYSTEM) != 0) { PRELE(p); return (0); } sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = proc_getauxv(curthread, p, &sb); error2 = sbuf_finish(&sb); PRELE(p); sbuf_delete(&sb); return (error != 0 ? error : error2); } /* * This sysctl allows a process to retrieve the path of the executable for * itself or another process. */ static int sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) { pid_t *pidp = (pid_t *)arg1; unsigned int arglen = arg2; struct proc *p; struct vnode *vp; char *retbuf, *freebuf; int error; if (arglen != 1) return (EINVAL); if (*pidp == -1) { /* -1 means this process */ p = req->td->td_proc; } else { error = pget(*pidp, PGET_CANSEE, &p); if (error != 0) return (error); } vp = p->p_textvp; if (vp == NULL) { if (*pidp != -1) PROC_UNLOCK(p); return (0); } vref(vp); if (*pidp != -1) PROC_UNLOCK(p); error = vn_fullpath(req->td, vp, &retbuf, &freebuf); vrele(vp); if (error) return (error); error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); free(freebuf, M_TEMP); return (error); } static int sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS) { struct proc *p; char *sv_name; int *name; int namelen; int error; namelen = arg2; if (namelen != 1) return (EINVAL); name = (int *)arg1; error = pget((pid_t)name[0], PGET_CANSEE, &p); if (error != 0) return (error); sv_name = p->p_sysent->sv_name; PROC_UNLOCK(p); return (sysctl_handle_string(oidp, sv_name, 0, req)); } #ifdef KINFO_OVMENTRY_SIZE CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE); #endif #ifdef COMPAT_FREEBSD7 static int sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS) { vm_map_entry_t entry, tmp_entry; unsigned int last_timestamp; char *fullpath, *freepath; struct kinfo_ovmentry *kve; struct vattr va; struct ucred *cred; int error, *name; struct vnode *vp; struct proc *p; vm_map_t map; struct vmspace *vm; name = (int *)arg1; error = pget((pid_t)name[0], PGET_WANTREAD, &p); if (error != 0) return (error); vm = vmspace_acquire_ref(p); if (vm == NULL) { PRELE(p); return (ESRCH); } kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); map = &vm->vm_map; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { vm_object_t obj, tobj, lobj; vm_offset_t addr; if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) continue; bzero(kve, sizeof(*kve)); kve->kve_structsize = sizeof(*kve); kve->kve_private_resident = 0; obj = entry->object.vm_object; if (obj != NULL) { VM_OBJECT_RLOCK(obj); if (obj->shadow_count == 1) kve->kve_private_resident = obj->resident_page_count; } kve->kve_resident = 0; addr = entry->start; while (addr < entry->end) { if (pmap_extract(map->pmap, addr)) kve->kve_resident++; addr += PAGE_SIZE; } for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { if (tobj != obj) VM_OBJECT_RLOCK(tobj); if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); lobj = tobj; } kve->kve_start = (void*)entry->start; kve->kve_end = (void*)entry->end; kve->kve_offset = (off_t)entry->offset; if (entry->protection & VM_PROT_READ) kve->kve_protection |= KVME_PROT_READ; if (entry->protection & VM_PROT_WRITE) kve->kve_protection |= KVME_PROT_WRITE; if (entry->protection & VM_PROT_EXECUTE) kve->kve_protection |= KVME_PROT_EXEC; if (entry->eflags & MAP_ENTRY_COW) kve->kve_flags |= KVME_FLAG_COW; if (entry->eflags & MAP_ENTRY_NEEDS_COPY) kve->kve_flags |= KVME_FLAG_NEEDS_COPY; if (entry->eflags & MAP_ENTRY_NOCOREDUMP) kve->kve_flags |= KVME_FLAG_NOCOREDUMP; last_timestamp = map->timestamp; vm_map_unlock_read(map); kve->kve_fileid = 0; kve->kve_fsid = 0; freepath = NULL; fullpath = ""; if (lobj) { vp = NULL; switch (lobj->type) { case OBJT_DEFAULT: kve->kve_type = KVME_TYPE_DEFAULT; break; case OBJT_VNODE: kve->kve_type = KVME_TYPE_VNODE; vp = lobj->handle; vref(vp); break; case OBJT_SWAP: if ((lobj->flags & OBJ_TMPFS_NODE) != 0) { kve->kve_type = KVME_TYPE_VNODE; if ((lobj->flags & OBJ_TMPFS) != 0) { vp = lobj->un_pager.swp.swp_tmpfs; vref(vp); } } else { kve->kve_type = KVME_TYPE_SWAP; } break; case OBJT_DEVICE: kve->kve_type = KVME_TYPE_DEVICE; break; case OBJT_PHYS: kve->kve_type = KVME_TYPE_PHYS; break; case OBJT_DEAD: kve->kve_type = KVME_TYPE_DEAD; break; case OBJT_SG: kve->kve_type = KVME_TYPE_SG; break; default: kve->kve_type = KVME_TYPE_UNKNOWN; break; } if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); kve->kve_ref_count = obj->ref_count; kve->kve_shadow_count = obj->shadow_count; VM_OBJECT_RUNLOCK(obj); if (vp != NULL) { vn_fullpath(curthread, vp, &fullpath, &freepath); cred = curthread->td_ucred; vn_lock(vp, LK_SHARED | LK_RETRY); if (VOP_GETATTR(vp, &va, cred) == 0) { kve->kve_fileid = va.va_fileid; /* truncate */ kve->kve_fsid = va.va_fsid; } vput(vp); } } else { kve->kve_type = KVME_TYPE_NONE; kve->kve_ref_count = 0; kve->kve_shadow_count = 0; } strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); if (freepath != NULL) free(freepath, M_TEMP); error = SYSCTL_OUT(req, kve, sizeof(*kve)); vm_map_lock_read(map); if (error) break; if (last_timestamp != map->timestamp) { vm_map_lookup_entry(map, addr - 1, &tmp_entry); entry = tmp_entry; } } vm_map_unlock_read(map); vmspace_free(vm); PRELE(p); free(kve, M_TEMP); return (error); } #endif /* COMPAT_FREEBSD7 */ #ifdef KINFO_VMENTRY_SIZE CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); #endif static void kern_proc_vmmap_resident(vm_map_t map, vm_map_entry_t entry, struct kinfo_vmentry *kve) { vm_object_t obj, tobj; vm_page_t m, m_adv; vm_offset_t addr; vm_paddr_t locked_pa; vm_pindex_t pi, pi_adv, pindex; locked_pa = 0; obj = entry->object.vm_object; addr = entry->start; m_adv = NULL; pi = OFF_TO_IDX(entry->offset); for (; addr < entry->end; addr += IDX_TO_OFF(pi_adv), pi += pi_adv) { if (m_adv != NULL) { m = m_adv; } else { pi_adv = atop(entry->end - addr); pindex = pi; for (tobj = obj;; tobj = tobj->backing_object) { m = vm_page_find_least(tobj, pindex); if (m != NULL) { if (m->pindex == pindex) break; if (pi_adv > m->pindex - pindex) { pi_adv = m->pindex - pindex; m_adv = m; } } if (tobj->backing_object == NULL) goto next; pindex += OFF_TO_IDX(tobj-> backing_object_offset); } } m_adv = NULL; if (m->psind != 0 && addr + pagesizes[1] <= entry->end && (addr & (pagesizes[1] - 1)) == 0 && (pmap_mincore(map->pmap, addr, &locked_pa) & MINCORE_SUPER) != 0) { kve->kve_flags |= KVME_FLAG_SUPER; pi_adv = atop(pagesizes[1]); } else { /* * We do not test the found page on validity. * Either the page is busy and being paged in, * or it was invalidated. The first case * should be counted as resident, the second * is not so clear; we do account both. */ pi_adv = 1; } kve->kve_resident += pi_adv; next:; } PA_UNLOCK_COND(locked_pa); } /* * Must be called with the process locked and will return unlocked. */ int kern_proc_vmmap_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags) { vm_map_entry_t entry, tmp_entry; struct vattr va; vm_map_t map; vm_object_t obj, tobj, lobj; char *fullpath, *freepath; struct kinfo_vmentry *kve; struct ucred *cred; struct vnode *vp; struct vmspace *vm; vm_offset_t addr; unsigned int last_timestamp; int error; PROC_LOCK_ASSERT(p, MA_OWNED); _PHOLD(p); PROC_UNLOCK(p); vm = vmspace_acquire_ref(p); if (vm == NULL) { PRELE(p); return (ESRCH); } kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK | M_ZERO); error = 0; map = &vm->vm_map; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) continue; addr = entry->end; bzero(kve, sizeof(*kve)); obj = entry->object.vm_object; if (obj != NULL) { for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { VM_OBJECT_RLOCK(tobj); lobj = tobj; } if (obj->backing_object == NULL) kve->kve_private_resident = obj->resident_page_count; if (!vmmap_skip_res_cnt) kern_proc_vmmap_resident(map, entry, kve); for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { if (tobj != obj && tobj != lobj) VM_OBJECT_RUNLOCK(tobj); } } else { lobj = NULL; } kve->kve_start = entry->start; kve->kve_end = entry->end; kve->kve_offset = entry->offset; if (entry->protection & VM_PROT_READ) kve->kve_protection |= KVME_PROT_READ; if (entry->protection & VM_PROT_WRITE) kve->kve_protection |= KVME_PROT_WRITE; if (entry->protection & VM_PROT_EXECUTE) kve->kve_protection |= KVME_PROT_EXEC; if (entry->eflags & MAP_ENTRY_COW) kve->kve_flags |= KVME_FLAG_COW; if (entry->eflags & MAP_ENTRY_NEEDS_COPY) kve->kve_flags |= KVME_FLAG_NEEDS_COPY; if (entry->eflags & MAP_ENTRY_NOCOREDUMP) kve->kve_flags |= KVME_FLAG_NOCOREDUMP; if (entry->eflags & MAP_ENTRY_GROWS_UP) kve->kve_flags |= KVME_FLAG_GROWS_UP; if (entry->eflags & MAP_ENTRY_GROWS_DOWN) kve->kve_flags |= KVME_FLAG_GROWS_DOWN; last_timestamp = map->timestamp; vm_map_unlock_read(map); freepath = NULL; fullpath = ""; if (lobj != NULL) { vp = NULL; switch (lobj->type) { case OBJT_DEFAULT: kve->kve_type = KVME_TYPE_DEFAULT; break; case OBJT_VNODE: kve->kve_type = KVME_TYPE_VNODE; vp = lobj->handle; vref(vp); break; case OBJT_SWAP: if ((lobj->flags & OBJ_TMPFS_NODE) != 0) { kve->kve_type = KVME_TYPE_VNODE; if ((lobj->flags & OBJ_TMPFS) != 0) { vp = lobj->un_pager.swp.swp_tmpfs; vref(vp); } } else { kve->kve_type = KVME_TYPE_SWAP; } break; case OBJT_DEVICE: kve->kve_type = KVME_TYPE_DEVICE; break; case OBJT_PHYS: kve->kve_type = KVME_TYPE_PHYS; break; case OBJT_DEAD: kve->kve_type = KVME_TYPE_DEAD; break; case OBJT_SG: kve->kve_type = KVME_TYPE_SG; break; case OBJT_MGTDEVICE: kve->kve_type = KVME_TYPE_MGTDEVICE; break; default: kve->kve_type = KVME_TYPE_UNKNOWN; break; } if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); kve->kve_ref_count = obj->ref_count; kve->kve_shadow_count = obj->shadow_count; VM_OBJECT_RUNLOCK(obj); if (vp != NULL) { vn_fullpath(curthread, vp, &fullpath, &freepath); kve->kve_vn_type = vntype_to_kinfo(vp->v_type); cred = curthread->td_ucred; vn_lock(vp, LK_SHARED | LK_RETRY); if (VOP_GETATTR(vp, &va, cred) == 0) { kve->kve_vn_fileid = va.va_fileid; kve->kve_vn_fsid = va.va_fsid; kve->kve_vn_fsid_freebsd11 = kve->kve_vn_fsid; /* truncate */ kve->kve_vn_mode = MAKEIMODE(va.va_type, va.va_mode); kve->kve_vn_size = va.va_size; kve->kve_vn_rdev = va.va_rdev; kve->kve_vn_rdev_freebsd11 = kve->kve_vn_rdev; /* truncate */ kve->kve_status = KF_ATTR_VALID; } vput(vp); } } else { kve->kve_type = KVME_TYPE_NONE; kve->kve_ref_count = 0; kve->kve_shadow_count = 0; } strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); if (freepath != NULL) free(freepath, M_TEMP); /* Pack record size down */ if ((flags & KERN_VMMAP_PACK_KINFO) != 0) kve->kve_structsize = offsetof(struct kinfo_vmentry, kve_path) + strlen(kve->kve_path) + 1; else kve->kve_structsize = sizeof(*kve); kve->kve_structsize = roundup(kve->kve_structsize, sizeof(uint64_t)); /* Halt filling and truncate rather than exceeding maxlen */ if (maxlen != -1 && maxlen < kve->kve_structsize) { error = 0; vm_map_lock_read(map); break; } else if (maxlen != -1) maxlen -= kve->kve_structsize; if (sbuf_bcat(sb, kve, kve->kve_structsize) != 0) error = ENOMEM; vm_map_lock_read(map); if (error != 0) break; if (last_timestamp != map->timestamp) { vm_map_lookup_entry(map, addr - 1, &tmp_entry); entry = tmp_entry; } } vm_map_unlock_read(map); vmspace_free(vm); PRELE(p); free(kve, M_TEMP); return (error); } static int sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS) { struct proc *p; struct sbuf sb; int error, error2, *name; name = (int *)arg1; sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_vmentry), req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p); if (error != 0) { sbuf_delete(&sb); return (error); } error = kern_proc_vmmap_out(p, &sb, -1, KERN_VMMAP_PACK_KINFO); error2 = sbuf_finish(&sb); sbuf_delete(&sb); return (error != 0 ? error : error2); } #if defined(STACK) || defined(DDB) static int sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS) { struct kinfo_kstack *kkstp; int error, i, *name, numthreads; lwpid_t *lwpidarray; struct thread *td; struct stack *st; struct sbuf sb; struct proc *p; name = (int *)arg1; error = pget((pid_t)name[0], PGET_NOTINEXEC | PGET_WANTREAD, &p); if (error != 0) return (error); kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK); st = stack_create(); lwpidarray = NULL; PROC_LOCK(p); do { if (lwpidarray != NULL) { free(lwpidarray, M_TEMP); lwpidarray = NULL; } numthreads = p->p_numthreads; PROC_UNLOCK(p); lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP, M_WAITOK | M_ZERO); PROC_LOCK(p); } while (numthreads < p->p_numthreads); /* * XXXRW: During the below loop, execve(2) and countless other sorts * of changes could have taken place. Should we check to see if the * vmspace has been replaced, or the like, in order to prevent * giving a snapshot that spans, say, execve(2), with some threads * before and some after? Among other things, the credentials could * have changed, in which case the right to extract debug info might * no longer be assured. */ i = 0; FOREACH_THREAD_IN_PROC(p, td) { KASSERT(i < numthreads, ("sysctl_kern_proc_kstack: numthreads")); lwpidarray[i] = td->td_tid; i++; } numthreads = i; for (i = 0; i < numthreads; i++) { td = thread_find(p, lwpidarray[i]); if (td == NULL) { continue; } bzero(kkstp, sizeof(*kkstp)); (void)sbuf_new(&sb, kkstp->kkst_trace, sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN); thread_lock(td); kkstp->kkst_tid = td->td_tid; if (TD_IS_SWAPPED(td)) { kkstp->kkst_state = KKST_STATE_SWAPPED; } else if (TD_IS_RUNNING(td)) { if (stack_save_td_running(st, td) == 0) kkstp->kkst_state = KKST_STATE_STACKOK; else kkstp->kkst_state = KKST_STATE_RUNNING; } else { kkstp->kkst_state = KKST_STATE_STACKOK; stack_save_td(st, td); } thread_unlock(td); PROC_UNLOCK(p); stack_sbuf_print(&sb, st); sbuf_finish(&sb); sbuf_delete(&sb); error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp)); PROC_LOCK(p); if (error) break; } _PRELE(p); PROC_UNLOCK(p); if (lwpidarray != NULL) free(lwpidarray, M_TEMP); stack_destroy(st); free(kkstp, M_TEMP); return (error); } #endif /* * This sysctl allows a process to retrieve the full list of groups from * itself or another process. */ static int sysctl_kern_proc_groups(SYSCTL_HANDLER_ARGS) { pid_t *pidp = (pid_t *)arg1; unsigned int arglen = arg2; struct proc *p; struct ucred *cred; int error; if (arglen != 1) return (EINVAL); if (*pidp == -1) { /* -1 means this process */ p = req->td->td_proc; PROC_LOCK(p); } else { error = pget(*pidp, PGET_CANSEE, &p); if (error != 0) return (error); } cred = crhold(p->p_ucred); PROC_UNLOCK(p); error = SYSCTL_OUT(req, cred->cr_groups, cred->cr_ngroups * sizeof(gid_t)); crfree(cred); return (error); } /* * This sysctl allows a process to retrieve or/and set the resource limit for * another process. */ static int sysctl_kern_proc_rlimit(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct rlimit rlim; struct proc *p; u_int which; int flags, error; if (namelen != 2) return (EINVAL); which = (u_int)name[1]; if (which >= RLIM_NLIMITS) return (EINVAL); if (req->newptr != NULL && req->newlen != sizeof(rlim)) return (EINVAL); flags = PGET_HOLD | PGET_NOTWEXIT; if (req->newptr != NULL) flags |= PGET_CANDEBUG; else flags |= PGET_CANSEE; error = pget((pid_t)name[0], flags, &p); if (error != 0) return (error); /* * Retrieve limit. */ if (req->oldptr != NULL) { PROC_LOCK(p); lim_rlimit_proc(p, which, &rlim); PROC_UNLOCK(p); } error = SYSCTL_OUT(req, &rlim, sizeof(rlim)); if (error != 0) goto errout; /* * Set limit. */ if (req->newptr != NULL) { error = SYSCTL_IN(req, &rlim, sizeof(rlim)); if (error == 0) error = kern_proc_setrlimit(curthread, p, which, &rlim); } errout: PRELE(p); return (error); } /* * This sysctl allows a process to retrieve ps_strings structure location of * another process. */ static int sysctl_kern_proc_ps_strings(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; vm_offset_t ps_strings; int error; #ifdef COMPAT_FREEBSD32 uint32_t ps_strings32; #endif if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_CANDEBUG, &p); if (error != 0) return (error); #ifdef COMPAT_FREEBSD32 if ((req->flags & SCTL_MASK32) != 0) { /* * We return 0 if the 32 bit emulation request is for a 64 bit * process. */ ps_strings32 = SV_PROC_FLAG(p, SV_ILP32) != 0 ? PTROUT(p->p_sysent->sv_psstrings) : 0; PROC_UNLOCK(p); error = SYSCTL_OUT(req, &ps_strings32, sizeof(ps_strings32)); return (error); } #endif ps_strings = p->p_sysent->sv_psstrings; PROC_UNLOCK(p); error = SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)); return (error); } /* * This sysctl allows a process to retrieve umask of another process. */ static int sysctl_kern_proc_umask(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; int error; u_short fd_cmask; if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_WANTREAD, &p); if (error != 0) return (error); FILEDESC_SLOCK(p->p_fd); fd_cmask = p->p_fd->fd_cmask; FILEDESC_SUNLOCK(p->p_fd); PRELE(p); error = SYSCTL_OUT(req, &fd_cmask, sizeof(fd_cmask)); return (error); } /* * This sysctl allows a process to set and retrieve binary osreldate of * another process. */ static int sysctl_kern_proc_osrel(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; int flags, error, osrel; if (namelen != 1) return (EINVAL); if (req->newptr != NULL && req->newlen != sizeof(osrel)) return (EINVAL); flags = PGET_HOLD | PGET_NOTWEXIT; if (req->newptr != NULL) flags |= PGET_CANDEBUG; else flags |= PGET_CANSEE; error = pget((pid_t)name[0], flags, &p); if (error != 0) return (error); error = SYSCTL_OUT(req, &p->p_osrel, sizeof(p->p_osrel)); if (error != 0) goto errout; if (req->newptr != NULL) { error = SYSCTL_IN(req, &osrel, sizeof(osrel)); if (error != 0) goto errout; if (osrel < 0) { error = EINVAL; goto errout; } p->p_osrel = osrel; } errout: PRELE(p); return (error); } static int sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; struct kinfo_sigtramp kst; const struct sysentvec *sv; int error; #ifdef COMPAT_FREEBSD32 struct kinfo_sigtramp32 kst32; #endif if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_CANDEBUG, &p); if (error != 0) return (error); sv = p->p_sysent; #ifdef COMPAT_FREEBSD32 if ((req->flags & SCTL_MASK32) != 0) { bzero(&kst32, sizeof(kst32)); if (SV_PROC_FLAG(p, SV_ILP32)) { if (sv->sv_sigcode_base != 0) { kst32.ksigtramp_start = sv->sv_sigcode_base; kst32.ksigtramp_end = sv->sv_sigcode_base + *sv->sv_szsigcode; } else { kst32.ksigtramp_start = sv->sv_psstrings - *sv->sv_szsigcode; kst32.ksigtramp_end = sv->sv_psstrings; } } PROC_UNLOCK(p); error = SYSCTL_OUT(req, &kst32, sizeof(kst32)); return (error); } #endif bzero(&kst, sizeof(kst)); if (sv->sv_sigcode_base != 0) { kst.ksigtramp_start = (char *)sv->sv_sigcode_base; kst.ksigtramp_end = (char *)sv->sv_sigcode_base + *sv->sv_szsigcode; } else { kst.ksigtramp_start = (char *)sv->sv_psstrings - *sv->sv_szsigcode; kst.ksigtramp_end = (char *)sv->sv_psstrings; } PROC_UNLOCK(p); error = SYSCTL_OUT(req, &kst, sizeof(kst)); return (error); } SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT| CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Return process table, no threads"); static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_CAPWR | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_args, "Process argument list"); static SYSCTL_NODE(_kern_proc, KERN_PROC_ENV, env, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_env, "Process environment"); static SYSCTL_NODE(_kern_proc, KERN_PROC_AUXV, auxv, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_auxv, "Process ELF auxiliary vector"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_pathname, "Process executable path"); static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), sid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Return process table, no threads"); #ifdef COMPAT_FREEBSD7 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap, "Old Process vm map entries"); #endif static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_vmmap, "Process vm map entries"); #if defined(STACK) || defined(DDB) static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_kstack, "Process kernel stacks"); #endif static SYSCTL_NODE(_kern_proc, KERN_PROC_GROUPS, groups, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_groups, "Process groups"); static SYSCTL_NODE(_kern_proc, KERN_PROC_RLIMIT, rlimit, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_rlimit, "Process resource limits"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PS_STRINGS, ps_strings, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_ps_strings, "Process ps_strings location"); static SYSCTL_NODE(_kern_proc, KERN_PROC_UMASK, umask, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_umask, "Process umask"); static SYSCTL_NODE(_kern_proc, KERN_PROC_OSREL, osrel, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_osrel, "Process binary osreldate"); static SYSCTL_NODE(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_sigtramp, "Process signal trampoline location"); int allproc_gen; /* * stop_all_proc() purpose is to stop all process which have usermode, * except current process for obvious reasons. This makes it somewhat * unreliable when invoked from multithreaded process. The service * must not be user-callable anyway. */ void stop_all_proc(void) { struct proc *cp, *p; int r, gen; bool restart, seen_stopped, seen_exiting, stopped_some; cp = curproc; allproc_loop: sx_xlock(&allproc_lock); gen = allproc_gen; seen_exiting = seen_stopped = stopped_some = restart = false; LIST_REMOVE(cp, p_list); LIST_INSERT_HEAD(&allproc, cp, p_list); for (;;) { p = LIST_NEXT(cp, p_list); if (p == NULL) break; LIST_REMOVE(cp, p_list); LIST_INSERT_AFTER(p, cp, p_list); PROC_LOCK(p); if ((p->p_flag & (P_KPROC | P_SYSTEM | P_TOTAL_STOP)) != 0) { PROC_UNLOCK(p); continue; } if ((p->p_flag & P_WEXIT) != 0) { seen_exiting = true; PROC_UNLOCK(p); continue; } if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { /* * Stopped processes are tolerated when there * are no other processes which might continue * them. P_STOPPED_SINGLE but not * P_TOTAL_STOP process still has at least one * thread running. */ seen_stopped = true; PROC_UNLOCK(p); continue; } _PHOLD(p); sx_xunlock(&allproc_lock); r = thread_single(p, SINGLE_ALLPROC); if (r != 0) restart = true; else stopped_some = true; _PRELE(p); PROC_UNLOCK(p); sx_xlock(&allproc_lock); } /* Catch forked children we did not see in iteration. */ if (gen != allproc_gen) restart = true; sx_xunlock(&allproc_lock); if (restart || stopped_some || seen_exiting || seen_stopped) { kern_yield(PRI_USER); goto allproc_loop; } } void resume_all_proc(void) { struct proc *cp, *p; cp = curproc; sx_xlock(&allproc_lock); LIST_REMOVE(cp, p_list); LIST_INSERT_HEAD(&allproc, cp, p_list); for (;;) { p = LIST_NEXT(cp, p_list); if (p == NULL) break; LIST_REMOVE(cp, p_list); LIST_INSERT_AFTER(p, cp, p_list); PROC_LOCK(p); if ((p->p_flag & P_TOTAL_STOP) != 0) { sx_xunlock(&allproc_lock); _PHOLD(p); thread_single_end(p, SINGLE_ALLPROC); _PRELE(p); PROC_UNLOCK(p); sx_xlock(&allproc_lock); } else { PROC_UNLOCK(p); } } sx_xunlock(&allproc_lock); } /* #define TOTAL_STOP_DEBUG 1 */ #ifdef TOTAL_STOP_DEBUG volatile static int ap_resume; #include static int sysctl_debug_stop_all_proc(SYSCTL_HANDLER_ARGS) { int error, val; val = 0; ap_resume = 0; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (val != 0) { stop_all_proc(); syncer_suspend(); while (ap_resume == 0) ; syncer_resume(); resume_all_proc(); } return (0); } SYSCTL_PROC(_debug, OID_AUTO, stop_all_proc, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, __DEVOLATILE(int *, &ap_resume), 0, sysctl_debug_stop_all_proc, "I", ""); #endif Index: projects/clang500-import/sys/kern/kern_sendfile.c =================================================================== --- projects/clang500-import/sys/kern/kern_sendfile.c (revision 319548) +++ projects/clang500-import/sys/kern/kern_sendfile.c (revision 319549) @@ -1,1039 +1,1039 @@ /*- * Copyright (c) 2013-2015 Gleb Smirnoff * Copyright (c) 1998, David Greenman. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Structure describing a single sendfile(2) I/O, which may consist of * several underlying pager I/Os. * * The syscall context allocates the structure and initializes 'nios' * to 1. As sendfile_swapin() runs through pages and starts asynchronous * paging operations, it increments 'nios'. * * Every I/O completion calls sendfile_iodone(), which decrements the 'nios', * and the syscall also calls sendfile_iodone() after allocating all mbufs, * linking them and sending to socket. Whoever reaches zero 'nios' is * responsible to * call pru_ready on the socket, to notify it of readyness * of the data. */ struct sf_io { volatile u_int nios; u_int error; int npages; struct file *sock_fp; struct mbuf *m; vm_page_t pa[]; }; /* * Structure used to track requests with SF_SYNC flag. */ struct sendfile_sync { struct mtx mtx; struct cv cv; unsigned count; }; counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)]; static void sfstat_init(const void *unused) { COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t), M_WAITOK); } SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL); static int sfstat_sysctl(SYSCTL_HANDLER_ARGS) { struct sfstat s; COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t)); if (req->newptr) COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t)); return (SYSCTL_OUT(req, &s, sizeof(s))); } SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW, NULL, 0, sfstat_sysctl, "I", "sendfile statistics"); /* * Detach mapped page and release resources back to the system. Called * by mbuf(9) code when last reference to a page is freed. */ void sf_ext_free(void *arg1, void *arg2) { struct sf_buf *sf = arg1; struct sendfile_sync *sfs = arg2; vm_page_t pg = sf_buf_page(sf); sf_buf_free(sf); vm_page_lock(pg); /* * Check for the object going away on us. This can * happen since we don't hold a reference to it. * If so, we're responsible for freeing the page. */ if (vm_page_unwire(pg, PQ_INACTIVE) && pg->object == NULL) vm_page_free(pg); vm_page_unlock(pg); if (sfs != NULL) { mtx_lock(&sfs->mtx); KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0")); if (--sfs->count == 0) cv_signal(&sfs->cv); mtx_unlock(&sfs->mtx); } } /* * Same as above, but forces the page to be detached from the object * and go into free pool. */ void sf_ext_free_nocache(void *arg1, void *arg2) { struct sf_buf *sf = arg1; struct sendfile_sync *sfs = arg2; vm_page_t pg = sf_buf_page(sf); sf_buf_free(sf); vm_page_lock(pg); if (vm_page_unwire(pg, PQ_NONE)) { vm_object_t obj; /* Try to free the page, but only if it is cheap to. */ if ((obj = pg->object) == NULL) vm_page_free(pg); else if (!vm_page_xbusied(pg) && VM_OBJECT_TRYWLOCK(obj)) { vm_page_free(pg); VM_OBJECT_WUNLOCK(obj); } else vm_page_deactivate(pg); } vm_page_unlock(pg); if (sfs != NULL) { mtx_lock(&sfs->mtx); KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0")); if (--sfs->count == 0) cv_signal(&sfs->cv); mtx_unlock(&sfs->mtx); } } /* * Helper function to calculate how much data to put into page i of n. * Only first and last pages are special. */ static inline off_t xfsize(int i, int n, off_t off, off_t len) { if (i == 0) return (omin(PAGE_SIZE - (off & PAGE_MASK), len)); if (i == n - 1 && ((off + len) & PAGE_MASK) > 0) return ((off + len) & PAGE_MASK); return (PAGE_SIZE); } /* * Helper function to get offset within object for i page. */ -static inline vm_offset_t +static inline vm_ooffset_t vmoff(int i, off_t off) { if (i == 0) - return ((vm_offset_t)off); + return ((vm_ooffset_t)off); return (trunc_page(off + i * PAGE_SIZE)); } /* * Helper function used when allocation of a page or sf_buf failed. * Pretend as if we don't have enough space, subtract xfsize() of * all pages that failed. */ static inline void fixspace(int old, int new, off_t off, int *space) { KASSERT(old > new, ("%s: old %d new %d", __func__, old, new)); /* Subtract last one. */ *space -= xfsize(old - 1, old, off, *space); old--; if (new == old) /* There was only one page. */ return; /* Subtract first one. */ if (new == 0) { *space -= xfsize(0, old, off, *space); new++; } /* Rest of pages are full sized. */ *space -= (old - new) * PAGE_SIZE; KASSERT(*space >= 0, ("%s: space went backwards", __func__)); } /* * I/O completion callback. */ static void sendfile_iodone(void *arg, vm_page_t *pg, int count, int error) { struct sf_io *sfio = arg; struct socket *so; for (int i = 0; i < count; i++) if (pg[i] != bogus_page) vm_page_xunbusy(pg[i]); if (error) sfio->error = error; if (!refcount_release(&sfio->nios)) return; so = sfio->sock_fp->f_data; if (sfio->error) { struct mbuf *m; /* * I/O operation failed. The state of data in the socket * is now inconsistent, and all what we can do is to tear * it down. Protocol abort method would tear down protocol * state, free all ready mbufs and detach not ready ones. * We will free the mbufs corresponding to this I/O manually. * * The socket would be marked with EIO and made available * for read, so that application receives EIO on next * syscall and eventually closes the socket. */ so->so_proto->pr_usrreqs->pru_abort(so); so->so_error = EIO; m = sfio->m; for (int i = 0; i < sfio->npages; i++) m = m_free(m); } else { CURVNET_SET(so->so_vnet); (void )(so->so_proto->pr_usrreqs->pru_ready)(so, sfio->m, sfio->npages); CURVNET_RESTORE(); } /* XXXGL: curthread */ fdrop(sfio->sock_fp, curthread); free(sfio, M_TEMP); } /* * Iterate through pages vector and request paging for non-valid pages. */ static int sendfile_swapin(vm_object_t obj, struct sf_io *sfio, off_t off, off_t len, int npages, int rhpages, int flags) { vm_page_t *pa = sfio->pa; int nios; nios = 0; flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0; /* * First grab all the pages and wire them. Note that we grab * only required pages. Readahead pages are dealt with later. */ VM_OBJECT_WLOCK(obj); for (int i = 0; i < npages; i++) { pa[i] = vm_page_grab(obj, OFF_TO_IDX(vmoff(i, off)), VM_ALLOC_WIRED | VM_ALLOC_NORMAL | flags); if (pa[i] == NULL) { npages = i; rhpages = 0; break; } } for (int i = 0; i < npages;) { int j, a, count, rv; /* Skip valid pages. */ if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK, xfsize(i, npages, off, len))) { vm_page_xunbusy(pa[i]); SFSTAT_INC(sf_pages_valid); i++; continue; } /* * Next page is invalid. Check if it belongs to pager. It * may not be there, which is a regular situation for shmem * pager. For vnode pager this happens only in case of * a sparse file. * * Important feature of vm_pager_has_page() is the hint * stored in 'a', about how many pages we can pagein after * this page in a single I/O. */ if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL, &a)) { pmap_zero_page(pa[i]); pa[i]->valid = VM_PAGE_BITS_ALL; pa[i]->dirty = 0; vm_page_xunbusy(pa[i]); i++; continue; } /* * We want to pagein as many pages as possible, limited only * by the 'a' hint and actual request. */ count = min(a + 1, npages - i); /* * We should not pagein into a valid page, thus we first trim * any valid pages off the end of request, and substitute * to bogus_page those, that are in the middle. */ for (j = i + count - 1; j > i; j--) { if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK, xfsize(j, npages, off, len))) { count--; rhpages = 0; } else break; } for (j = i + 1; j < i + count - 1; j++) if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK, xfsize(j, npages, off, len))) { vm_page_xunbusy(pa[j]); SFSTAT_INC(sf_pages_valid); SFSTAT_INC(sf_pages_bogus); pa[j] = bogus_page; } refcount_acquire(&sfio->nios); rv = vm_pager_get_pages_async(obj, pa + i, count, NULL, i + count == npages ? &rhpages : NULL, &sendfile_iodone, sfio); KASSERT(rv == VM_PAGER_OK, ("%s: pager fail obj %p page %p", __func__, obj, pa[i])); SFSTAT_INC(sf_iocnt); SFSTAT_ADD(sf_pages_read, count); if (i + count == npages) SFSTAT_ADD(sf_rhpages_read, rhpages); /* * Restore the valid page pointers. They are already * unbusied, but still wired. */ for (j = i; j < i + count; j++) if (pa[j] == bogus_page) { pa[j] = vm_page_lookup(obj, OFF_TO_IDX(vmoff(j, off))); KASSERT(pa[j], ("%s: page %p[%d] disappeared", __func__, pa, j)); } i += count; nios++; } VM_OBJECT_WUNLOCK(obj); if (nios == 0 && npages != 0) SFSTAT_INC(sf_noiocnt); return (nios); } static int sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res, struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size, int *bsize) { struct vattr va; vm_object_t obj; struct vnode *vp; struct shmfd *shmfd; int error; vp = *vp_res = NULL; obj = NULL; shmfd = *shmfd_res = NULL; *bsize = 0; /* * The file descriptor must be a regular file and have a * backing VM object. */ if (fp->f_type == DTYPE_VNODE) { vp = fp->f_vnode; vn_lock(vp, LK_SHARED | LK_RETRY); if (vp->v_type != VREG) { error = EINVAL; goto out; } *bsize = vp->v_mount->mnt_stat.f_iosize; error = VOP_GETATTR(vp, &va, td->td_ucred); if (error != 0) goto out; *obj_size = va.va_size; obj = vp->v_object; if (obj == NULL) { error = EINVAL; goto out; } } else if (fp->f_type == DTYPE_SHM) { error = 0; shmfd = fp->f_data; obj = shmfd->shm_object; *obj_size = shmfd->shm_size; } else { error = EINVAL; goto out; } VM_OBJECT_WLOCK(obj); if ((obj->flags & OBJ_DEAD) != 0) { VM_OBJECT_WUNLOCK(obj); error = EBADF; goto out; } /* * Temporarily increase the backing VM object's reference * count so that a forced reclamation of its vnode does not * immediately destroy it. */ vm_object_reference_locked(obj); VM_OBJECT_WUNLOCK(obj); *obj_res = obj; *vp_res = vp; *shmfd_res = shmfd; out: if (vp != NULL) VOP_UNLOCK(vp, 0); return (error); } static int sendfile_getsock(struct thread *td, int s, struct file **sock_fp, struct socket **so) { cap_rights_t rights; int error; *sock_fp = NULL; *so = NULL; /* * The socket must be a stream socket and connected. */ error = getsock_cap(td, s, cap_rights_init(&rights, CAP_SEND), sock_fp, NULL, NULL); if (error != 0) return (error); *so = (*sock_fp)->f_data; if ((*so)->so_type != SOCK_STREAM) return (EINVAL); if (((*so)->so_state & SS_ISCONNECTED) == 0) return (ENOTCONN); return (0); } int vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio, struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags, struct thread *td) { struct file *sock_fp; struct vnode *vp; struct vm_object *obj; struct socket *so; struct mbuf *m, *mh, *mhtail; struct sf_buf *sf; struct shmfd *shmfd; struct sendfile_sync *sfs; struct vattr va; off_t off, sbytes, rem, obj_size; int error, softerr, bsize, hdrlen; obj = NULL; so = NULL; m = mh = NULL; sfs = NULL; hdrlen = sbytes = 0; softerr = 0; error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize); if (error != 0) return (error); error = sendfile_getsock(td, sockfd, &sock_fp, &so); if (error != 0) goto out; #ifdef MAC error = mac_socket_check_send(td->td_ucred, so); if (error != 0) goto out; #endif SFSTAT_INC(sf_syscalls); SFSTAT_ADD(sf_rhpages_requested, SF_READAHEAD(flags)); if (flags & SF_SYNC) { sfs = malloc(sizeof *sfs, M_TEMP, M_WAITOK | M_ZERO); mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF); cv_init(&sfs->cv, "sendfile"); } rem = nbytes ? omin(nbytes, obj_size - offset) : obj_size - offset; /* * Protect against multiple writers to the socket. * * XXXRW: Historically this has assumed non-interruptibility, so now * we implement that, but possibly shouldn't. */ (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR); /* * Loop through the pages of the file, starting with the requested * offset. Get a file page (do I/O if necessary), map the file page * into an sf_buf, attach an mbuf header to the sf_buf, and queue * it on the socket. * This is done in two loops. The inner loop turns as many pages * as it can, up to available socket buffer space, without blocking * into mbufs to have it bulk delivered into the socket send buffer. * The outer loop checks the state and available space of the socket * and takes care of the overall progress. */ for (off = offset; rem > 0; ) { struct sf_io *sfio; vm_page_t *pa; struct mbuf *mtail; int nios, space, npages, rhpages; mtail = NULL; /* * Check the socket state for ongoing connection, * no errors and space in socket buffer. * If space is low allow for the remainder of the * file to be processed if it fits the socket buffer. * Otherwise block in waiting for sufficient space * to proceed, or if the socket is nonblocking, return * to userland with EAGAIN while reporting how far * we've come. * We wait until the socket buffer has significant free * space to do bulk sends. This makes good use of file * system read ahead and allows packet segmentation * offloading hardware to take over lots of work. If * we were not careful here we would send off only one * sfbuf at a time. */ SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2) so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2; retry_space: if (so->so_snd.sb_state & SBS_CANTSENDMORE) { error = EPIPE; SOCKBUF_UNLOCK(&so->so_snd); goto done; } else if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_snd); goto done; } space = sbspace(&so->so_snd); if (space < rem && (space <= 0 || space < so->so_snd.sb_lowat)) { if (so->so_state & SS_NBIO) { SOCKBUF_UNLOCK(&so->so_snd); error = EAGAIN; goto done; } /* * sbwait drops the lock while sleeping. * When we loop back to retry_space the * state may have changed and we retest * for it. */ error = sbwait(&so->so_snd); /* * An error from sbwait usually indicates that we've * been interrupted by a signal. If we've sent anything * then return bytes sent, otherwise return the error. */ if (error != 0) { SOCKBUF_UNLOCK(&so->so_snd); goto done; } goto retry_space; } SOCKBUF_UNLOCK(&so->so_snd); /* * At the beginning of the first loop check if any headers * are specified and copy them into mbufs. Reduce space in * the socket buffer by the size of the header mbuf chain. * Clear hdr_uio here and hdrlen at the end of the first loop. */ if (hdr_uio != NULL && hdr_uio->uio_resid > 0) { hdr_uio->uio_td = td; hdr_uio->uio_rw = UIO_WRITE; mh = m_uiotombuf(hdr_uio, M_WAITOK, space, 0, 0); hdrlen = m_length(mh, &mhtail); space -= hdrlen; /* * If header consumed all the socket buffer space, * don't waste CPU cycles and jump to the end. */ if (space == 0) { sfio = NULL; nios = 0; goto prepend_header; } hdr_uio = NULL; } if (vp != NULL) { error = vn_lock(vp, LK_SHARED); if (error != 0) goto done; error = VOP_GETATTR(vp, &va, td->td_ucred); if (error != 0 || off >= va.va_size) { VOP_UNLOCK(vp, 0); goto done; } if (va.va_size != obj_size) { obj_size = va.va_size; rem = nbytes ? omin(nbytes + offset, obj_size) : obj_size; rem -= off; } } if (space > rem) space = rem; npages = howmany(space + (off & PAGE_MASK), PAGE_SIZE); /* * Calculate maximum allowed number of pages for readahead * at this iteration. If SF_USER_READAHEAD was set, we don't * do any heuristics and use exactly the value supplied by * application. Otherwise, we allow readahead up to "rem". * If application wants more, let it be, but there is no * reason to go above MAXPHYS. Also check against "obj_size", * since vm_pager_has_page() can hint beyond EOF. */ if (flags & SF_USER_READAHEAD) { rhpages = SF_READAHEAD(flags); } else { rhpages = howmany(rem + (off & PAGE_MASK), PAGE_SIZE) - npages; rhpages += SF_READAHEAD(flags); } rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages); rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) - npages, rhpages); sfio = malloc(sizeof(struct sf_io) + npages * sizeof(vm_page_t), M_TEMP, M_WAITOK); refcount_init(&sfio->nios, 1); sfio->error = 0; nios = sendfile_swapin(obj, sfio, off, space, npages, rhpages, flags); /* * Loop and construct maximum sized mbuf chain to be bulk * dumped into socket buffer. */ pa = sfio->pa; for (int i = 0; i < npages; i++) { struct mbuf *m0; /* * If a page wasn't grabbed successfully, then * trim the array. Can happen only with SF_NODISKIO. */ if (pa[i] == NULL) { SFSTAT_INC(sf_busy); fixspace(npages, i, off, &space); npages = i; softerr = EBUSY; break; } /* * Get a sendfile buf. When allocating the * first buffer for mbuf chain, we usually * wait as long as necessary, but this wait * can be interrupted. For consequent * buffers, do not sleep, since several * threads might exhaust the buffers and then * deadlock. */ sf = sf_buf_alloc(pa[i], m != NULL ? SFB_NOWAIT : SFB_CATCH); if (sf == NULL) { SFSTAT_INC(sf_allocfail); for (int j = i; j < npages; j++) { vm_page_lock(pa[j]); vm_page_unwire(pa[j], PQ_INACTIVE); vm_page_unlock(pa[j]); } if (m == NULL) softerr = ENOBUFS; fixspace(npages, i, off, &space); npages = i; break; } m0 = m_get(M_WAITOK, MT_DATA); m0->m_ext.ext_buf = (char *)sf_buf_kva(sf); m0->m_ext.ext_size = PAGE_SIZE; m0->m_ext.ext_arg1 = sf; m0->m_ext.ext_arg2 = sfs; /* * SF_NOCACHE sets the page as being freed upon send. * However, we ignore it for the last page in 'space', * if the page is truncated, and we got more data to * send (rem > space), or if we have readahead * configured (rhpages > 0). */ if ((flags & SF_NOCACHE) == 0 || (i == npages - 1 && ((off + space) & PAGE_MASK) && (rem > space || rhpages > 0))) m0->m_ext.ext_type = EXT_SFBUF; else m0->m_ext.ext_type = EXT_SFBUF_NOCACHE; m0->m_ext.ext_flags = EXT_FLAG_EMBREF; m0->m_ext.ext_count = 1; m0->m_flags |= (M_EXT | M_RDONLY); if (nios) m0->m_flags |= M_NOTREADY; m0->m_data = (char *)sf_buf_kva(sf) + (vmoff(i, off) & PAGE_MASK); m0->m_len = xfsize(i, npages, off, space); if (i == 0) sfio->m = m0; /* Append to mbuf chain. */ if (mtail != NULL) mtail->m_next = m0; else m = m0; mtail = m0; if (sfs != NULL) { mtx_lock(&sfs->mtx); sfs->count++; mtx_unlock(&sfs->mtx); } } if (vp != NULL) VOP_UNLOCK(vp, 0); /* Keep track of bytes processed. */ off += space; rem -= space; /* Prepend header, if any. */ if (hdrlen) { prepend_header: mhtail->m_next = m; m = mh; mh = NULL; } if (m == NULL) { KASSERT(softerr, ("%s: m NULL, no error", __func__)); error = softerr; free(sfio, M_TEMP); goto done; } /* Add the buffer chain to the socket buffer. */ KASSERT(m_length(m, NULL) == space + hdrlen, ("%s: mlen %u space %d hdrlen %d", __func__, m_length(m, NULL), space, hdrlen)); CURVNET_SET(so->so_vnet); if (nios == 0) { /* * If sendfile_swapin() didn't initiate any I/Os, * which happens if all data is cached in VM, then * we can send data right now without the * PRUS_NOTREADY flag. */ free(sfio, M_TEMP); error = (*so->so_proto->pr_usrreqs->pru_send) (so, 0, m, NULL, NULL, td); } else { sfio->sock_fp = sock_fp; sfio->npages = npages; fhold(sock_fp); error = (*so->so_proto->pr_usrreqs->pru_send) (so, PRUS_NOTREADY, m, NULL, NULL, td); sendfile_iodone(sfio, NULL, 0, 0); } CURVNET_RESTORE(); m = NULL; /* pru_send always consumes */ if (error) goto done; sbytes += space + hdrlen; if (hdrlen) hdrlen = 0; if (softerr) { error = softerr; goto done; } } /* * Send trailers. Wimp out and use writev(2). */ if (trl_uio != NULL) { sbunlock(&so->so_snd); error = kern_writev(td, sockfd, trl_uio); if (error == 0) sbytes += td->td_retval[0]; goto out; } done: sbunlock(&so->so_snd); out: /* * If there was no error we have to clear td->td_retval[0] * because it may have been set by writev. */ if (error == 0) { td->td_retval[0] = 0; } if (sent != NULL) { (*sent) = sbytes; } if (obj != NULL) vm_object_deallocate(obj); if (so) fdrop(sock_fp, td); if (m) m_freem(m); if (mh) m_freem(mh); if (sfs != NULL) { mtx_lock(&sfs->mtx); if (sfs->count != 0) cv_wait(&sfs->cv, &sfs->mtx); KASSERT(sfs->count == 0, ("sendfile sync still busy")); cv_destroy(&sfs->cv); mtx_destroy(&sfs->mtx); free(sfs, M_TEMP); } if (error == ERESTART) error = EINTR; return (error); } static int sendfile(struct thread *td, struct sendfile_args *uap, int compat) { struct sf_hdtr hdtr; struct uio *hdr_uio, *trl_uio; struct file *fp; cap_rights_t rights; off_t sbytes; int error; /* * File offset must be positive. If it goes beyond EOF * we send only the header/trailer and no payload data. */ if (uap->offset < 0) return (EINVAL); hdr_uio = trl_uio = NULL; if (uap->hdtr != NULL) { error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); if (error != 0) goto out; if (hdtr.headers != NULL) { error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio); if (error != 0) goto out; #ifdef COMPAT_FREEBSD4 /* * In FreeBSD < 5.0 the nbytes to send also included * the header. If compat is specified subtract the * header size from nbytes. */ if (compat) { if (uap->nbytes > hdr_uio->uio_resid) uap->nbytes -= hdr_uio->uio_resid; else uap->nbytes = 0; } #endif } if (hdtr.trailers != NULL) { error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio); if (error != 0) goto out; } } AUDIT_ARG_FD(uap->fd); /* * sendfile(2) can start at any offset within a file so we require * CAP_READ+CAP_SEEK = CAP_PREAD. */ if ((error = fget_read(td, uap->fd, cap_rights_init(&rights, CAP_PREAD), &fp)) != 0) { goto out; } error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset, uap->nbytes, &sbytes, uap->flags, td); fdrop(fp, td); if (uap->sbytes != NULL) copyout(&sbytes, uap->sbytes, sizeof(off_t)); out: free(hdr_uio, M_IOV); free(trl_uio, M_IOV); return (error); } /* * sendfile(2) * * int sendfile(int fd, int s, off_t offset, size_t nbytes, * struct sf_hdtr *hdtr, off_t *sbytes, int flags) * * Send a file specified by 'fd' and starting at 'offset' to a socket * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes == * 0. Optionally add a header and/or trailer to the socket output. If * specified, write the total number of bytes sent into *sbytes. */ int sys_sendfile(struct thread *td, struct sendfile_args *uap) { return (sendfile(td, uap, 0)); } #ifdef COMPAT_FREEBSD4 int freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap) { struct sendfile_args args; args.fd = uap->fd; args.s = uap->s; args.offset = uap->offset; args.nbytes = uap->nbytes; args.hdtr = uap->hdtr; args.sbytes = uap->sbytes; args.flags = uap->flags; return (sendfile(td, &args, 1)); } #endif /* COMPAT_FREEBSD4 */ Index: projects/clang500-import/sys/kern/uipc_accf.c =================================================================== --- projects/clang500-import/sys/kern/uipc_accf.c (revision 319548) +++ projects/clang500-import/sys/kern/uipc_accf.c (revision 319549) @@ -1,297 +1,292 @@ /*- * Copyright (c) 2000 Paycounter, Inc. * Copyright (c) 2005 Robert N. M. Watson * Author: Alfred Perlstein , * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define ACCEPT_FILTER_MOD #include "opt_param.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct mtx accept_filter_mtx; MTX_SYSINIT(accept_filter, &accept_filter_mtx, "accept_filter_mtx", MTX_DEF); #define ACCEPT_FILTER_LOCK() mtx_lock(&accept_filter_mtx) #define ACCEPT_FILTER_UNLOCK() mtx_unlock(&accept_filter_mtx) static SLIST_HEAD(, accept_filter) accept_filtlsthd = SLIST_HEAD_INITIALIZER(accept_filtlsthd); MALLOC_DEFINE(M_ACCF, "accf", "accept filter data"); static int unloadable = 0; SYSCTL_NODE(_net, OID_AUTO, accf, CTLFLAG_RW, 0, "Accept filters"); SYSCTL_INT(_net_accf, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0, "Allow unload of accept filters (not recommended)"); /* * Must be passed a malloc'd structure so we don't explode if the kld is * unloaded, we leak the struct on deallocation to deal with this, but if a * filter is loaded with the same name as a leaked one we re-use the entry. */ int accept_filt_add(struct accept_filter *filt) { struct accept_filter *p; ACCEPT_FILTER_LOCK(); SLIST_FOREACH(p, &accept_filtlsthd, accf_next) if (strcmp(p->accf_name, filt->accf_name) == 0) { if (p->accf_callback != NULL) { ACCEPT_FILTER_UNLOCK(); return (EEXIST); } else { p->accf_callback = filt->accf_callback; ACCEPT_FILTER_UNLOCK(); free(filt, M_ACCF); return (0); } } if (p == NULL) SLIST_INSERT_HEAD(&accept_filtlsthd, filt, accf_next); ACCEPT_FILTER_UNLOCK(); return (0); } int accept_filt_del(char *name) { struct accept_filter *p; p = accept_filt_get(name); if (p == NULL) return (ENOENT); p->accf_callback = NULL; return (0); } struct accept_filter * accept_filt_get(char *name) { struct accept_filter *p; ACCEPT_FILTER_LOCK(); SLIST_FOREACH(p, &accept_filtlsthd, accf_next) if (strcmp(p->accf_name, name) == 0) break; ACCEPT_FILTER_UNLOCK(); return (p); } int accept_filt_generic_mod_event(module_t mod, int event, void *data) { struct accept_filter *p; struct accept_filter *accfp = (struct accept_filter *) data; int error; switch (event) { case MOD_LOAD: - p = malloc(sizeof(*p), M_ACCF, - M_WAITOK); + p = malloc(sizeof(*p), M_ACCF, M_WAITOK); bcopy(accfp, p, sizeof(*p)); error = accept_filt_add(p); break; case MOD_UNLOAD: /* * Do not support unloading yet. we don't keep track of * refcounts and unloading an accept filter callback and then * having it called is a bad thing. A simple fix would be to * track the refcount in the struct accept_filter. */ if (unloadable != 0) { error = accept_filt_del(accfp->accf_name); } else error = EOPNOTSUPP; break; case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } int -do_getopt_accept_filter(struct socket *so, struct sockopt *sopt) +accept_filt_getopt(struct socket *so, struct sockopt *sopt) { struct accept_filter_arg *afap; int error; error = 0; - afap = malloc(sizeof(*afap), M_TEMP, - M_WAITOK | M_ZERO); + afap = malloc(sizeof(*afap), M_TEMP, M_WAITOK | M_ZERO); SOCK_LOCK(so); if ((so->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto out; } if ((so->so_options & SO_ACCEPTFILTER) == 0) { error = EINVAL; goto out; } strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); if (so->so_accf->so_accept_filter_str != NULL) strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); out: SOCK_UNLOCK(so); if (error == 0) error = sooptcopyout(sopt, afap, sizeof(*afap)); free(afap, M_TEMP); return (error); } int -do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) +accept_filt_setopt(struct socket *so, struct sockopt *sopt) { struct accept_filter_arg *afap; struct accept_filter *afp; struct so_accf *newaf; int error = 0; /* * Handle the simple delete case first. */ if (sopt == NULL || sopt->sopt_val == NULL) { SOCK_LOCK(so); if ((so->so_options & SO_ACCEPTCONN) == 0) { SOCK_UNLOCK(so); return (EINVAL); } if (so->so_accf != NULL) { struct so_accf *af = so->so_accf; if (af->so_accept_filter != NULL && af->so_accept_filter->accf_destroy != NULL) { af->so_accept_filter->accf_destroy(so); } if (af->so_accept_filter_str != NULL) free(af->so_accept_filter_str, M_ACCF); free(af, M_ACCF); so->so_accf = NULL; } so->so_options &= ~SO_ACCEPTFILTER; SOCK_UNLOCK(so); return (0); } /* * Pre-allocate any memory we may need later to avoid blocking at * untimely moments. This does not optimize for invalid arguments. */ - afap = malloc(sizeof(*afap), M_TEMP, - M_WAITOK); + afap = malloc(sizeof(*afap), M_TEMP, M_WAITOK); error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); afap->af_name[sizeof(afap->af_name)-1] = '\0'; afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; if (error) { free(afap, M_TEMP); return (error); } afp = accept_filt_get(afap->af_name); if (afp == NULL) { free(afap, M_TEMP); return (ENOENT); } /* * Allocate the new accept filter instance storage. We may * have to free it again later if we fail to attach it. If * attached properly, 'newaf' is NULLed to avoid a free() * while in use. */ - newaf = malloc(sizeof(*newaf), M_ACCF, M_WAITOK | - M_ZERO); + newaf = malloc(sizeof(*newaf), M_ACCF, M_WAITOK | M_ZERO); if (afp->accf_create != NULL && afap->af_name[0] != '\0') { size_t len = strlen(afap->af_name) + 1; - newaf->so_accept_filter_str = malloc(len, M_ACCF, - M_WAITOK); + newaf->so_accept_filter_str = malloc(len, M_ACCF, M_WAITOK); strcpy(newaf->so_accept_filter_str, afap->af_name); } /* * Require a listen socket; don't try to replace an existing filter * without first removing it. */ SOCK_LOCK(so); if (((so->so_options & SO_ACCEPTCONN) == 0) || (so->so_accf != NULL)) { error = EINVAL; goto out; } /* * Invoke the accf_create() method of the filter if required. The * socket mutex is held over this call, so create methods for filters * can't block. */ if (afp->accf_create != NULL) { newaf->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); if (newaf->so_accept_filter_arg == NULL) { error = EINVAL; goto out; } } newaf->so_accept_filter = afp; so->so_accf = newaf; so->so_options |= SO_ACCEPTFILTER; newaf = NULL; out: SOCK_UNLOCK(so); if (newaf != NULL) { if (newaf->so_accept_filter_str != NULL) free(newaf->so_accept_filter_str, M_ACCF); free(newaf, M_ACCF); } if (afap != NULL) free(afap, M_TEMP); return (error); } Index: projects/clang500-import/sys/kern/uipc_socket.c =================================================================== --- projects/clang500-import/sys/kern/uipc_socket.c (revision 319548) +++ projects/clang500-import/sys/kern/uipc_socket.c (revision 319549) @@ -1,3782 +1,3782 @@ /*- * Copyright (c) 1982, 1986, 1988, 1990, 1993 * The Regents of the University of California. * Copyright (c) 2004 The FreeBSD Foundation * Copyright (c) 2004-2008 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 */ /* * Comments on the socket life cycle: * * soalloc() sets of socket layer state for a socket, called only by * socreate() and sonewconn(). Socket layer private. * * sodealloc() tears down socket layer state for a socket, called only by * sofree() and sonewconn(). Socket layer private. * * pru_attach() associates protocol layer state with an allocated socket; * called only once, may fail, aborting socket allocation. This is called * from socreate() and sonewconn(). Socket layer private. * * pru_detach() disassociates protocol layer state from an attached socket, * and will be called exactly once for sockets in which pru_attach() has * been successfully called. If pru_attach() returned an error, * pru_detach() will not be called. Socket layer private. * * pru_abort() and pru_close() notify the protocol layer that the last * consumer of a socket is starting to tear down the socket, and that the * protocol should terminate the connection. Historically, pru_abort() also * detached protocol state from the socket state, but this is no longer the * case. * * socreate() creates a socket and attaches protocol state. This is a public * interface that may be used by socket layer consumers to create new * sockets. * * sonewconn() creates a socket and attaches protocol state. This is a * public interface that may be used by protocols to create new sockets when * a new connection is received and will be available for accept() on a * listen socket. * * soclose() destroys a socket after possibly waiting for it to disconnect. * This is a public interface that socket consumers should use to close and * release a socket when done with it. * * soabort() destroys a socket without waiting for it to disconnect (used * only for incoming connections that are already partially or fully * connected). This is used internally by the socket layer when clearing * listen socket queues (due to overflow or close on the listen socket), but * is also a public interface protocols may use to abort connections in * their incomplete listen queues should they no longer be required. Sockets * placed in completed connection listen queues should not be aborted for * reasons described in the comment above the soclose() implementation. This * is not a general purpose close routine, and except in the specific * circumstances described here, should not be used. * * sofree() will free a socket and its protocol state if all references on * the socket have been released, and is the public interface to attempt to * free a socket when a reference is removed. This is a socket layer private * interface. * * NOTE: In addition to socreate() and soclose(), which provide a single * socket reference to the consumer to be managed as required, there are two * calls to explicitly manage socket references, soref(), and sorele(). * Currently, these are generally required only when transitioning a socket * from a listen queue to a file descriptor, in order to prevent garbage * collection of the socket at an untimely moment. For a number of reasons, * these interfaces are not preferred, and should be avoided. * * NOTE: With regard to VNETs the general rule is that callers do not set * curvnet. Exceptions to this rule include soabort(), sodisconnect(), * sofree() (and with that sorele(), sotryfree()), as well as sonewconn() * and sorflush(), which are usually called from a pre-set VNET context. * sopoll() currently does not need a VNET context to be set. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_compat.h" #include #include #include #include #include #include #include #include #include #include #include /* for struct knote */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef COMPAT_FREEBSD32 #include #include #include #endif static int soreceive_rcvoob(struct socket *so, struct uio *uio, int flags); static void filt_sordetach(struct knote *kn); static int filt_soread(struct knote *kn, long hint); static void filt_sowdetach(struct knote *kn); static int filt_sowrite(struct knote *kn, long hint); static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id); static int filt_soempty(struct knote *kn, long hint); fo_kqfilter_t soo_kqfilter; static struct filterops soread_filtops = { .f_isfd = 1, .f_detach = filt_sordetach, .f_event = filt_soread, }; static struct filterops sowrite_filtops = { .f_isfd = 1, .f_detach = filt_sowdetach, .f_event = filt_sowrite, }; static struct filterops soempty_filtops = { .f_isfd = 1, .f_detach = filt_sowdetach, .f_event = filt_soempty, }; so_gen_t so_gencnt; /* generation count for sockets */ MALLOC_DEFINE(M_SONAME, "soname", "socket name"); MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); #define VNET_SO_ASSERT(so) \ VNET_ASSERT(curvnet != NULL, \ ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so))); VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]); #define V_socket_hhh VNET(socket_hhh) /* * Limit on the number of connections in the listen queue waiting * for accept(2). * NB: The original sysctl somaxconn is still available but hidden * to prevent confusion about the actual purpose of this number. */ static u_int somaxconn = SOMAXCONN; static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS) { int error; int val; val = somaxconn; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr ) return (error); /* * The purpose of the UINT_MAX / 3 limit, is so that the formula * 3 * so_qlimit / 2 * below, will not overflow. */ if (val < 1 || val > UINT_MAX / 3) return (EINVAL); somaxconn = val; return (0); } SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW, 0, sizeof(int), sysctl_somaxconn, "I", "Maximum listen socket pending connection accept queue size"); SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP, 0, sizeof(int), sysctl_somaxconn, "I", "Maximum listen socket pending connection accept queue size (compat)"); static int numopensockets; SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, &numopensockets, 0, "Number of open sockets"); /* * accept_mtx locks down per-socket fields relating to accept queues. See * socketvar.h for an annotation of the protected fields of struct socket. */ struct mtx accept_mtx; MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); /* * so_global_mtx protects so_gencnt, numopensockets, and the per-socket * so_gencnt field. */ static struct mtx so_global_mtx; MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); /* * General IPC sysctl name space, used by sockets and a variety of other IPC * types. */ SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); /* * Initialize the socket subsystem and set up the socket * memory allocator. */ static uma_zone_t socket_zone; int maxsockets; static void socket_zone_change(void *tag) { maxsockets = uma_zone_set_max(socket_zone, maxsockets); } static void socket_hhook_register(int subtype) { if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype, &V_socket_hhh[subtype], HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) printf("%s: WARNING: unable to register hook\n", __func__); } static void socket_hhook_deregister(int subtype) { if (hhook_head_deregister(V_socket_hhh[subtype]) != 0) printf("%s: WARNING: unable to deregister hook\n", __func__); } static void socket_init(void *tag) { socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); maxsockets = uma_zone_set_max(socket_zone, maxsockets); uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached"); EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL, EVENTHANDLER_PRI_FIRST); } SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL); static void socket_vnet_init(const void *unused __unused) { int i; /* We expect a contiguous range */ for (i = 0; i <= HHOOK_SOCKET_LAST; i++) socket_hhook_register(i); } VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_vnet_init, NULL); static void socket_vnet_uninit(const void *unused __unused) { int i; for (i = 0; i <= HHOOK_SOCKET_LAST; i++) socket_hhook_deregister(i); } VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_vnet_uninit, NULL); /* * Initialise maxsockets. This SYSINIT must be run after * tunable_mbinit(). */ static void init_maxsockets(void *ignored) { TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); maxsockets = imax(maxsockets, maxfiles); } SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); /* * Sysctl to get and set the maximum global sockets limit. Notify protocols * of the change so that they can update their dependent limits as required. */ static int sysctl_maxsockets(SYSCTL_HANDLER_ARGS) { int error, newmaxsockets; newmaxsockets = maxsockets; error = sysctl_handle_int(oidp, &newmaxsockets, 0, req); if (error == 0 && req->newptr) { if (newmaxsockets > maxsockets && newmaxsockets <= maxfiles) { maxsockets = newmaxsockets; EVENTHANDLER_INVOKE(maxsockets_change); } else error = EINVAL; } return (error); } SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, &maxsockets, 0, sysctl_maxsockets, "IU", "Maximum number of sockets available"); /* * Socket operation routines. These routines are called by the routines in * sys_socket.c or from a system process, and implement the semantics of * socket operations by switching out to the protocol specific routines. */ /* * Get a socket structure from our zone, and initialize it. Note that it * would probably be better to allocate socket and PCB at the same time, but * I'm not convinced that all the protocols can be easily modified to do * this. * * soalloc() returns a socket with a ref count of 0. */ static struct socket * soalloc(struct vnet *vnet) { struct socket *so; so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO); if (so == NULL) return (NULL); #ifdef MAC if (mac_socket_init(so, M_NOWAIT) != 0) { uma_zfree(socket_zone, so); return (NULL); } #endif if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) { uma_zfree(socket_zone, so); return (NULL); } SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); sx_init(&so->so_snd.sb_sx, "so_snd_sx"); sx_init(&so->so_rcv.sb_sx, "so_rcv_sx"); TAILQ_INIT(&so->so_snd.sb_aiojobq); TAILQ_INIT(&so->so_rcv.sb_aiojobq); TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so); TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so); #ifdef VIMAGE VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p", __func__, __LINE__, so)); so->so_vnet = vnet; #endif /* We shouldn't need the so_global_mtx */ if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) { /* Do we need more comprehensive error returns? */ uma_zfree(socket_zone, so); return (NULL); } mtx_lock(&so_global_mtx); so->so_gencnt = ++so_gencnt; ++numopensockets; #ifdef VIMAGE vnet->vnet_sockcnt++; #endif mtx_unlock(&so_global_mtx); return (so); } /* * Free the storage associated with a socket at the socket layer, tear down * locks, labels, etc. All protocol state is assumed already to have been * torn down (and possibly never set up) by the caller. */ static void sodealloc(struct socket *so) { KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); mtx_lock(&so_global_mtx); so->so_gencnt = ++so_gencnt; --numopensockets; /* Could be below, but faster here. */ #ifdef VIMAGE VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p", __func__, __LINE__, so)); so->so_vnet->vnet_sockcnt--; #endif mtx_unlock(&so_global_mtx); if (so->so_rcv.sb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); if (so->so_snd.sb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); /* remove accept filter if one is present. */ if (so->so_accf != NULL) - do_setopt_accept_filter(so, NULL); + accept_filt_setopt(so, NULL); #ifdef MAC mac_socket_destroy(so); #endif hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE); crfree(so->so_cred); khelp_destroy_osd(&so->osd); sx_destroy(&so->so_snd.sb_sx); sx_destroy(&so->so_rcv.sb_sx); SOCKBUF_LOCK_DESTROY(&so->so_snd); SOCKBUF_LOCK_DESTROY(&so->so_rcv); uma_zfree(socket_zone, so); } /* * socreate returns a socket with a ref count of 1. The socket should be * closed with soclose(). */ int socreate(int dom, struct socket **aso, int type, int proto, struct ucred *cred, struct thread *td) { struct protosw *prp; struct socket *so; int error; if (proto) prp = pffindproto(dom, proto, type); else prp = pffindtype(dom, type); if (prp == NULL) { /* No support for domain. */ if (pffinddomain(dom) == NULL) return (EAFNOSUPPORT); /* No support for socket type. */ if (proto == 0 && type != 0) return (EPROTOTYPE); return (EPROTONOSUPPORT); } if (prp->pr_usrreqs->pru_attach == NULL || prp->pr_usrreqs->pru_attach == pru_attach_notsupp) return (EPROTONOSUPPORT); if (prison_check_af(cred, prp->pr_domain->dom_family) != 0) return (EPROTONOSUPPORT); if (prp->pr_type != type) return (EPROTOTYPE); so = soalloc(CRED_TO_VNET(cred)); if (so == NULL) return (ENOBUFS); TAILQ_INIT(&so->so_incomp); TAILQ_INIT(&so->so_comp); so->so_type = type; so->so_cred = crhold(cred); if ((prp->pr_domain->dom_family == PF_INET) || (prp->pr_domain->dom_family == PF_INET6) || (prp->pr_domain->dom_family == PF_ROUTE)) so->so_fibnum = td->td_proc->p_fibnum; else so->so_fibnum = 0; so->so_proto = prp; #ifdef MAC mac_socket_create(cred, so); #endif knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv)); knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd)); so->so_count = 1; /* * Auto-sizing of socket buffers is managed by the protocols and * the appropriate flags must be set in the pru_attach function. */ CURVNET_SET(so->so_vnet); error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); CURVNET_RESTORE(); if (error) { KASSERT(so->so_count == 1, ("socreate: so_count %d", so->so_count)); so->so_count = 0; sodealloc(so); return (error); } *aso = so; return (0); } #ifdef REGRESSION static int regression_sonewconn_earlytest = 1; SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); #endif /* * When an attempt at a new connection is noted on a socket which accepts * connections, sonewconn is called. If the connection is possible (subject * to space constraints, etc.) then we allocate a new structure, properly * linked into the data structure of the original socket, and return this. * Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED. * * Note: the ref count on the socket is 0 on return. */ struct socket * sonewconn(struct socket *head, int connstatus) { static struct timeval lastover; static struct timeval overinterval = { 60, 0 }; static int overcount; struct socket *so; int over; ACCEPT_LOCK(); over = (head->so_qlen > 3 * head->so_qlimit / 2); ACCEPT_UNLOCK(); #ifdef REGRESSION if (regression_sonewconn_earlytest && over) { #else if (over) { #endif overcount++; if (ratecheck(&lastover, &overinterval)) { log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: " "%i already in queue awaiting acceptance " "(%d occurrences)\n", __func__, head->so_pcb, head->so_qlen, overcount); overcount = 0; } return (NULL); } VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p", __func__, __LINE__, head)); so = soalloc(head->so_vnet); if (so == NULL) { log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: " "limit reached or out of memory\n", __func__, head->so_pcb); return (NULL); } if ((head->so_options & SO_ACCEPTFILTER) != 0) connstatus = 0; so->so_head = head; so->so_type = head->so_type; so->so_options = head->so_options &~ SO_ACCEPTCONN; so->so_linger = head->so_linger; so->so_state = head->so_state | SS_NOFDREF; so->so_fibnum = head->so_fibnum; so->so_proto = head->so_proto; so->so_cred = crhold(head->so_cred); #ifdef MAC mac_socket_newconn(head, so); #endif knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv)); knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd)); VNET_SO_ASSERT(head); if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) { sodealloc(so); log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n", __func__, head->so_pcb); return (NULL); } if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { sodealloc(so); log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n", __func__, head->so_pcb); return (NULL); } so->so_rcv.sb_lowat = head->so_rcv.sb_lowat; so->so_snd.sb_lowat = head->so_snd.sb_lowat; so->so_rcv.sb_timeo = head->so_rcv.sb_timeo; so->so_snd.sb_timeo = head->so_snd.sb_timeo; so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE; so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE; so->so_state |= connstatus; ACCEPT_LOCK(); /* * The accept socket may be tearing down but we just * won a race on the ACCEPT_LOCK. * However, if sctp_peeloff() is called on a 1-to-many * style socket, the SO_ACCEPTCONN doesn't need to be set. */ if (!(head->so_options & SO_ACCEPTCONN) && ((head->so_proto->pr_protocol != IPPROTO_SCTP) || (head->so_type != SOCK_SEQPACKET))) { SOCK_LOCK(so); so->so_head = NULL; sofree(so); /* NB: returns ACCEPT_UNLOCK'ed. */ return (NULL); } if (connstatus) { TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); so->so_qstate |= SQ_COMP; head->so_qlen++; } else { /* * Keep removing sockets from the head until there's room for * us to insert on the tail. In pre-locking revisions, this * was a simple if(), but as we could be racing with other * threads and soabort() requires dropping locks, we must * loop waiting for the condition to be true. */ while (head->so_incqlen > head->so_qlimit) { struct socket *sp; sp = TAILQ_FIRST(&head->so_incomp); TAILQ_REMOVE(&head->so_incomp, sp, so_list); head->so_incqlen--; sp->so_qstate &= ~SQ_INCOMP; sp->so_head = NULL; ACCEPT_UNLOCK(); soabort(sp); ACCEPT_LOCK(); } TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); so->so_qstate |= SQ_INCOMP; head->so_incqlen++; } ACCEPT_UNLOCK(); if (connstatus) { sorwakeup(head); wakeup_one(&head->so_timeo); } return (so); } int sobind(struct socket *so, struct sockaddr *nam, struct thread *td) { int error; CURVNET_SET(so->so_vnet); error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td); CURVNET_RESTORE(); return (error); } int sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { int error; CURVNET_SET(so->so_vnet); error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td); CURVNET_RESTORE(); return (error); } /* * solisten() transitions a socket from a non-listening state to a listening * state, but can also be used to update the listen queue depth on an * existing listen socket. The protocol will call back into the sockets * layer using solisten_proto_check() and solisten_proto() to check and set * socket-layer listen state. Call backs are used so that the protocol can * acquire both protocol and socket layer locks in whatever order is required * by the protocol. * * Protocol implementors are advised to hold the socket lock across the * socket-layer test and set to avoid races at the socket layer. */ int solisten(struct socket *so, int backlog, struct thread *td) { int error; CURVNET_SET(so->so_vnet); error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td); CURVNET_RESTORE(); return (error); } int solisten_proto_check(struct socket *so) { SOCK_LOCK_ASSERT(so); if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) return (EINVAL); return (0); } void solisten_proto(struct socket *so, int backlog) { SOCK_LOCK_ASSERT(so); if (backlog < 0 || backlog > somaxconn) backlog = somaxconn; so->so_qlimit = backlog; so->so_options |= SO_ACCEPTCONN; } /* * Evaluate the reference count and named references on a socket; if no * references remain, free it. This should be called whenever a reference is * released, such as in sorele(), but also when named reference flags are * cleared in socket or protocol code. * * sofree() will free the socket if: * * - There are no outstanding file descriptor references or related consumers * (so_count == 0). * * - The socket has been closed by user space, if ever open (SS_NOFDREF). * * - The protocol does not have an outstanding strong reference on the socket * (SS_PROTOREF). * * - The socket is not in a completed connection queue, so a process has been * notified that it is present. If it is removed, the user process may * block in accept() despite select() saying the socket was ready. */ void sofree(struct socket *so) { struct protosw *pr = so->so_proto; struct socket *head; ACCEPT_LOCK_ASSERT(); SOCK_LOCK_ASSERT(so); if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 || (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) { SOCK_UNLOCK(so); ACCEPT_UNLOCK(); return; } head = so->so_head; if (head != NULL) { KASSERT((so->so_qstate & SQ_COMP) != 0 || (so->so_qstate & SQ_INCOMP) != 0, ("sofree: so_head != NULL, but neither SQ_COMP nor " "SQ_INCOMP")); KASSERT((so->so_qstate & SQ_COMP) == 0 || (so->so_qstate & SQ_INCOMP) == 0, ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); TAILQ_REMOVE(&head->so_incomp, so, so_list); head->so_incqlen--; so->so_qstate &= ~SQ_INCOMP; so->so_head = NULL; } KASSERT((so->so_qstate & SQ_COMP) == 0 && (so->so_qstate & SQ_INCOMP) == 0, ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); if (so->so_options & SO_ACCEPTCONN) { KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated")); KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_incomp populated")); } SOCK_UNLOCK(so); ACCEPT_UNLOCK(); VNET_SO_ASSERT(so); if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) (*pr->pr_domain->dom_dispose)(so); if (pr->pr_usrreqs->pru_detach != NULL) (*pr->pr_usrreqs->pru_detach)(so); /* * From this point on, we assume that no other references to this * socket exist anywhere else in the stack. Therefore, no locks need * to be acquired or held. * * We used to do a lot of socket buffer and socket locking here, as * well as invoke sorflush() and perform wakeups. The direct call to * dom_dispose() and sbrelease_internal() are an inlining of what was * necessary from sorflush(). * * Notice that the socket buffer and kqueue state are torn down * before calling pru_detach. This means that protocols shold not * assume they can perform socket wakeups, etc, in their detach code. */ sbdestroy(&so->so_snd, so); sbdestroy(&so->so_rcv, so); seldrain(&so->so_snd.sb_sel); seldrain(&so->so_rcv.sb_sel); knlist_destroy(&so->so_rcv.sb_sel.si_note); knlist_destroy(&so->so_snd.sb_sel.si_note); sodealloc(so); } /* * Close a socket on last file table reference removal. Initiate disconnect * if connected. Free socket when disconnect complete. * * This function will sorele() the socket. Note that soclose() may be called * prior to the ref count reaching zero. The actual socket structure will * not be freed until the ref count reaches zero. */ int soclose(struct socket *so) { int error = 0; KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); CURVNET_SET(so->so_vnet); funsetown(&so->so_sigio); if (so->so_state & SS_ISCONNECTED) { if ((so->so_state & SS_ISDISCONNECTING) == 0) { error = sodisconnect(so); if (error) { if (error == ENOTCONN) error = 0; goto drop; } } if (so->so_options & SO_LINGER) { if ((so->so_state & SS_ISDISCONNECTING) && (so->so_state & SS_NBIO)) goto drop; while (so->so_state & SS_ISCONNECTED) { error = tsleep(&so->so_timeo, PSOCK | PCATCH, "soclos", so->so_linger * hz); if (error) break; } } } drop: if (so->so_proto->pr_usrreqs->pru_close != NULL) (*so->so_proto->pr_usrreqs->pru_close)(so); ACCEPT_LOCK(); if (so->so_options & SO_ACCEPTCONN) { struct socket *sp; /* * Prevent new additions to the accept queues due * to ACCEPT_LOCK races while we are draining them. */ so->so_options &= ~SO_ACCEPTCONN; while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { TAILQ_REMOVE(&so->so_incomp, sp, so_list); so->so_incqlen--; sp->so_qstate &= ~SQ_INCOMP; sp->so_head = NULL; ACCEPT_UNLOCK(); soabort(sp); ACCEPT_LOCK(); } while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { TAILQ_REMOVE(&so->so_comp, sp, so_list); so->so_qlen--; sp->so_qstate &= ~SQ_COMP; sp->so_head = NULL; ACCEPT_UNLOCK(); soabort(sp); ACCEPT_LOCK(); } KASSERT((TAILQ_EMPTY(&so->so_comp)), ("%s: so_comp populated", __func__)); KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("%s: so_incomp populated", __func__)); } SOCK_LOCK(so); KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); so->so_state |= SS_NOFDREF; sorele(so); /* NB: Returns with ACCEPT_UNLOCK(). */ CURVNET_RESTORE(); return (error); } /* * soabort() is used to abruptly tear down a connection, such as when a * resource limit is reached (listen queue depth exceeded), or if a listen * socket is closed while there are sockets waiting to be accepted. * * This interface is tricky, because it is called on an unreferenced socket, * and must be called only by a thread that has actually removed the socket * from the listen queue it was on, or races with other threads are risked. * * This interface will call into the protocol code, so must not be called * with any socket locks held. Protocols do call it while holding their own * recursible protocol mutexes, but this is something that should be subject * to review in the future. */ void soabort(struct socket *so) { /* * In as much as is possible, assert that no references to this * socket are held. This is not quite the same as asserting that the * current thread is responsible for arranging for no references, but * is as close as we can get for now. */ KASSERT(so->so_count == 0, ("soabort: so_count")); KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF")); KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF")); KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP")); KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP")); VNET_SO_ASSERT(so); if (so->so_proto->pr_usrreqs->pru_abort != NULL) (*so->so_proto->pr_usrreqs->pru_abort)(so); ACCEPT_LOCK(); SOCK_LOCK(so); sofree(so); } int soaccept(struct socket *so, struct sockaddr **nam) { int error; SOCK_LOCK(so); KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); so->so_state &= ~SS_NOFDREF; SOCK_UNLOCK(so); CURVNET_SET(so->so_vnet); error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); CURVNET_RESTORE(); return (error); } int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) { return (soconnectat(AT_FDCWD, so, nam, td)); } int soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { int error; if (so->so_options & SO_ACCEPTCONN) return (EOPNOTSUPP); CURVNET_SET(so->so_vnet); /* * If protocol is connection-based, can only connect once. * Otherwise, if connected, try to disconnect first. This allows * user to disconnect by connecting to, e.g., a null address. */ if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && ((so->so_proto->pr_flags & PR_CONNREQUIRED) || (error = sodisconnect(so)))) { error = EISCONN; } else { /* * Prevent accumulated error from previous connection from * biting us. */ so->so_error = 0; if (fd == AT_FDCWD) { error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); } else { error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd, so, nam, td); } } CURVNET_RESTORE(); return (error); } int soconnect2(struct socket *so1, struct socket *so2) { int error; CURVNET_SET(so1->so_vnet); error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2); CURVNET_RESTORE(); return (error); } int sodisconnect(struct socket *so) { int error; if ((so->so_state & SS_ISCONNECTED) == 0) return (ENOTCONN); if (so->so_state & SS_ISDISCONNECTING) return (EALREADY); VNET_SO_ASSERT(so); error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); return (error); } #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) int sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { long space; ssize_t resid; int clen = 0, error, dontroute; KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM")); KASSERT(so->so_proto->pr_flags & PR_ATOMIC, ("sosend_dgram: !PR_ATOMIC")); if (uio != NULL) resid = uio->uio_resid; else resid = top->m_pkthdr.len; /* * In theory resid should be unsigned. However, space must be * signed, as it might be less than 0 if we over-committed, and we * must use a signed comparison of space and resid. On the other * hand, a negative resid causes us to loop sending 0-length * segments to the protocol. */ if (resid < 0) { error = EINVAL; goto out; } dontroute = (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; if (td != NULL) td->td_ru.ru_msgsnd++; if (control != NULL) clen = control->m_len; SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCKBUF_UNLOCK(&so->so_snd); error = EPIPE; goto out; } if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_snd); goto out; } if ((so->so_state & SS_ISCONNECTED) == 0) { /* * `sendto' and `sendmsg' is allowed on a connection-based * socket if it supports implied connect. Return ENOTCONN if * not connected and no address is supplied. */ if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { if ((so->so_state & SS_ISCONFIRMING) == 0 && !(resid == 0 && clen != 0)) { SOCKBUF_UNLOCK(&so->so_snd); error = ENOTCONN; goto out; } } else if (addr == NULL) { if (so->so_proto->pr_flags & PR_CONNREQUIRED) error = ENOTCONN; else error = EDESTADDRREQ; SOCKBUF_UNLOCK(&so->so_snd); goto out; } } /* * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a * problem and need fixing. */ space = sbspace(&so->so_snd); if (flags & MSG_OOB) space += 1024; space -= clen; SOCKBUF_UNLOCK(&so->so_snd); if (resid > space) { error = EMSGSIZE; goto out; } if (uio == NULL) { resid = 0; if (flags & MSG_EOR) top->m_flags |= M_EOR; } else { /* * Copy the data from userland into a mbuf chain. * If no data is to be copied in, a single empty mbuf * is returned. */ top = m_uiotombuf(uio, M_WAITOK, space, max_hdr, (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0))); if (top == NULL) { error = EFAULT; /* only possible error */ goto out; } space -= resid - uio->uio_resid; resid = uio->uio_resid; } KASSERT(resid == 0, ("sosend_dgram: resid != 0")); /* * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock * than with. */ if (dontroute) { SOCK_LOCK(so); so->so_options |= SO_DONTROUTE; SOCK_UNLOCK(so); } /* * XXX all the SBS_CANTSENDMORE checks previously done could be out * of date. We could have received a reset packet in an interrupt or * maybe we slept while doing page faults in uiomove() etc. We could * probably recheck again inside the locking protection here, but * there are probably other places that this also happens. We must * rethink this. */ VNET_SO_ASSERT(so); error = (*so->so_proto->pr_usrreqs->pru_send)(so, (flags & MSG_OOB) ? PRUS_OOB : /* * If the user set MSG_EOF, the protocol understands this flag and * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND. */ ((flags & MSG_EOF) && (so->so_proto->pr_flags & PR_IMPLOPCL) && (resid <= 0)) ? PRUS_EOF : /* If there is more to send set PRUS_MORETOCOME */ (flags & MSG_MORETOCOME) || (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, top, addr, control, td); if (dontroute) { SOCK_LOCK(so); so->so_options &= ~SO_DONTROUTE; SOCK_UNLOCK(so); } clen = 0; control = NULL; top = NULL; out: if (top != NULL) m_freem(top); if (control != NULL) m_freem(control); return (error); } /* * Send on a socket. If send must go all at once and message is larger than * send buffering, then hard error. Lock against other senders. If must go * all at once and not enough room now, then inform user that this would * block and do nothing. Otherwise, if nonblocking, send as much as * possible. The data to be sent is described by "uio" if nonzero, otherwise * by the mbuf chain "top" (which must be null if uio is not). Data provided * in mbuf chain must be small enough to send all at once. * * Returns nonzero on error, timeout or signal; callers must check for short * counts if EINTR/ERESTART are returned. Data and control buffers are freed * on return. */ int sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { long space; ssize_t resid; int clen = 0, error, dontroute; int atomic = sosendallatonce(so) || top; if (uio != NULL) resid = uio->uio_resid; else resid = top->m_pkthdr.len; /* * In theory resid should be unsigned. However, space must be * signed, as it might be less than 0 if we over-committed, and we * must use a signed comparison of space and resid. On the other * hand, a negative resid causes us to loop sending 0-length * segments to the protocol. * * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM * type sockets since that's an error. */ if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { error = EINVAL; goto out; } dontroute = (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && (so->so_proto->pr_flags & PR_ATOMIC); if (td != NULL) td->td_ru.ru_msgsnd++; if (control != NULL) clen = control->m_len; error = sblock(&so->so_snd, SBLOCKWAIT(flags)); if (error) goto out; restart: do { SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCKBUF_UNLOCK(&so->so_snd); error = EPIPE; goto release; } if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_snd); goto release; } if ((so->so_state & SS_ISCONNECTED) == 0) { /* * `sendto' and `sendmsg' is allowed on a connection- * based socket if it supports implied connect. * Return ENOTCONN if not connected and no address is * supplied. */ if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { if ((so->so_state & SS_ISCONFIRMING) == 0 && !(resid == 0 && clen != 0)) { SOCKBUF_UNLOCK(&so->so_snd); error = ENOTCONN; goto release; } } else if (addr == NULL) { SOCKBUF_UNLOCK(&so->so_snd); if (so->so_proto->pr_flags & PR_CONNREQUIRED) error = ENOTCONN; else error = EDESTADDRREQ; goto release; } } space = sbspace(&so->so_snd); if (flags & MSG_OOB) space += 1024; if ((atomic && resid > so->so_snd.sb_hiwat) || clen > so->so_snd.sb_hiwat) { SOCKBUF_UNLOCK(&so->so_snd); error = EMSGSIZE; goto release; } if (space < resid + clen && (atomic || space < so->so_snd.sb_lowat || space < clen)) { if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) { SOCKBUF_UNLOCK(&so->so_snd); error = EWOULDBLOCK; goto release; } error = sbwait(&so->so_snd); SOCKBUF_UNLOCK(&so->so_snd); if (error) goto release; goto restart; } SOCKBUF_UNLOCK(&so->so_snd); space -= clen; do { if (uio == NULL) { resid = 0; if (flags & MSG_EOR) top->m_flags |= M_EOR; } else { /* * Copy the data from userland into a mbuf * chain. If resid is 0, which can happen * only if we have control to send, then * a single empty mbuf is returned. This * is a workaround to prevent protocol send * methods to panic. */ top = m_uiotombuf(uio, M_WAITOK, space, (atomic ? max_hdr : 0), (atomic ? M_PKTHDR : 0) | ((flags & MSG_EOR) ? M_EOR : 0)); if (top == NULL) { error = EFAULT; /* only possible error */ goto release; } space -= resid - uio->uio_resid; resid = uio->uio_resid; } if (dontroute) { SOCK_LOCK(so); so->so_options |= SO_DONTROUTE; SOCK_UNLOCK(so); } /* * XXX all the SBS_CANTSENDMORE checks previously * done could be out of date. We could have received * a reset packet in an interrupt or maybe we slept * while doing page faults in uiomove() etc. We * could probably recheck again inside the locking * protection here, but there are probably other * places that this also happens. We must rethink * this. */ VNET_SO_ASSERT(so); error = (*so->so_proto->pr_usrreqs->pru_send)(so, (flags & MSG_OOB) ? PRUS_OOB : /* * If the user set MSG_EOF, the protocol understands * this flag and nothing left to send then use * PRU_SEND_EOF instead of PRU_SEND. */ ((flags & MSG_EOF) && (so->so_proto->pr_flags & PR_IMPLOPCL) && (resid <= 0)) ? PRUS_EOF : /* If there is more to send set PRUS_MORETOCOME. */ (flags & MSG_MORETOCOME) || (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, top, addr, control, td); if (dontroute) { SOCK_LOCK(so); so->so_options &= ~SO_DONTROUTE; SOCK_UNLOCK(so); } clen = 0; control = NULL; top = NULL; if (error) goto release; } while (resid && space > 0); } while (resid); release: sbunlock(&so->so_snd); out: if (top != NULL) m_freem(top); if (control != NULL) m_freem(control); return (error); } int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { int error; CURVNET_SET(so->so_vnet); error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top, control, flags, td); CURVNET_RESTORE(); return (error); } /* * The part of soreceive() that implements reading non-inline out-of-band * data from a socket. For more complete comments, see soreceive(), from * which this code originated. * * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is * unable to return an mbuf chain to the caller. */ static int soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) { struct protosw *pr = so->so_proto; struct mbuf *m; int error; KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); VNET_SO_ASSERT(so); m = m_get(M_WAITOK, MT_DATA); error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); if (error) goto bad; do { error = uiomove(mtod(m, void *), (int) min(uio->uio_resid, m->m_len), uio); m = m_free(m); } while (uio->uio_resid && error == 0 && m); bad: if (m != NULL) m_freem(m); return (error); } /* * Following replacement or removal of the first mbuf on the first mbuf chain * of a socket buffer, push necessary state changes back into the socket * buffer so that other consumers see the values consistently. 'nextrecord' * is the callers locally stored value of the original value of * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. * NOTE: 'nextrecord' may be NULL. */ static __inline void sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) { SOCKBUF_LOCK_ASSERT(sb); /* * First, update for the new value of nextrecord. If necessary, make * it the first record. */ if (sb->sb_mb != NULL) sb->sb_mb->m_nextpkt = nextrecord; else sb->sb_mb = nextrecord; /* * Now update any dependent socket buffer fields to reflect the new * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the * addition of a second clause that takes care of the case where * sb_mb has been updated, but remains the last record. */ if (sb->sb_mb == NULL) { sb->sb_mbtail = NULL; sb->sb_lastrecord = NULL; } else if (sb->sb_mb->m_nextpkt == NULL) sb->sb_lastrecord = sb->sb_mb; } /* * Implement receive operations on a socket. We depend on the way that * records are added to the sockbuf by sbappend. In particular, each record * (mbufs linked through m_next) must begin with an address if the protocol * so specifies, followed by an optional mbuf or mbufs containing ancillary * data, and then zero or more mbufs of data. In order to allow parallelism * between network receive and copying to user space, as well as avoid * sleeping with a mutex held, we release the socket buffer mutex during the * user space copy. Although the sockbuf is locked, new data may still be * appended, and thus we must maintain consistency of the sockbuf during that * time. * * The caller may receive the data as a single mbuf chain by supplying an * mbuf **mp0 for use in returning the chain. The uio is then used only for * the count in uio_resid. */ int soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { struct mbuf *m, **mp; int flags, error, offset; ssize_t len; struct protosw *pr = so->so_proto; struct mbuf *nextrecord; int moff, type = 0; ssize_t orig_resid = uio->uio_resid; mp = mp0; if (psa != NULL) *psa = NULL; if (controlp != NULL) *controlp = NULL; if (flagsp != NULL) flags = *flagsp &~ MSG_EOR; else flags = 0; if (flags & MSG_OOB) return (soreceive_rcvoob(so, uio, flags)); if (mp != NULL) *mp = NULL; if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) && uio->uio_resid) { VNET_SO_ASSERT(so); (*pr->pr_usrreqs->pru_rcvd)(so, 0); } error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); if (error) return (error); restart: SOCKBUF_LOCK(&so->so_rcv); m = so->so_rcv.sb_mb; /* * If we have less data than requested, block awaiting more (subject * to any timeout) if: * 1. the current count is less than the low water mark, or * 2. MSG_DONTWAIT is not set */ if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && sbavail(&so->so_rcv) < uio->uio_resid) && sbavail(&so->so_rcv) < so->so_rcv.sb_lowat && m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { KASSERT(m != NULL || !sbavail(&so->so_rcv), ("receive: m == %p sbavail == %u", m, sbavail(&so->so_rcv))); if (so->so_error) { if (m != NULL) goto dontblock; error = so->so_error; if ((flags & MSG_PEEK) == 0) so->so_error = 0; SOCKBUF_UNLOCK(&so->so_rcv); goto release; } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { if (m == NULL) { SOCKBUF_UNLOCK(&so->so_rcv); goto release; } else goto dontblock; } for (; m != NULL; m = m->m_next) if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { m = so->so_rcv.sb_mb; goto dontblock; } if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && (so->so_proto->pr_flags & PR_CONNREQUIRED)) { SOCKBUF_UNLOCK(&so->so_rcv); error = ENOTCONN; goto release; } if (uio->uio_resid == 0) { SOCKBUF_UNLOCK(&so->so_rcv); goto release; } if ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO))) { SOCKBUF_UNLOCK(&so->so_rcv); error = EWOULDBLOCK; goto release; } SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); error = sbwait(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); if (error) goto release; goto restart; } dontblock: /* * From this point onward, we maintain 'nextrecord' as a cache of the * pointer to the next record in the socket buffer. We must keep the * various socket buffer pointers and local stack versions of the * pointers in sync, pushing out modifications before dropping the * socket buffer mutex, and re-reading them when picking it up. * * Otherwise, we will race with the network stack appending new data * or records onto the socket buffer by using inconsistent/stale * versions of the field, possibly resulting in socket buffer * corruption. * * By holding the high-level sblock(), we prevent simultaneous * readers from pulling off the front of the socket buffer. */ SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); nextrecord = m->m_nextpkt; if (pr->pr_flags & PR_ADDR) { KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); orig_resid = 0; if (psa != NULL) *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_NOWAIT); if (flags & MSG_PEEK) { m = m->m_next; } else { sbfree(&so->so_rcv, m); so->so_rcv.sb_mb = m_free(m); m = so->so_rcv.sb_mb; sockbuf_pushsync(&so->so_rcv, nextrecord); } } /* * Process one or more MT_CONTROL mbufs present before any data mbufs * in the first mbuf chain on the socket buffer. If MSG_PEEK, we * just copy the data; if !MSG_PEEK, we call into the protocol to * perform externalization (or freeing if controlp == NULL). */ if (m != NULL && m->m_type == MT_CONTROL) { struct mbuf *cm = NULL, *cmn; struct mbuf **cme = &cm; do { if (flags & MSG_PEEK) { if (controlp != NULL) { *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); controlp = &(*controlp)->m_next; } m = m->m_next; } else { sbfree(&so->so_rcv, m); so->so_rcv.sb_mb = m->m_next; m->m_next = NULL; *cme = m; cme = &(*cme)->m_next; m = so->so_rcv.sb_mb; } } while (m != NULL && m->m_type == MT_CONTROL); if ((flags & MSG_PEEK) == 0) sockbuf_pushsync(&so->so_rcv, nextrecord); while (cm != NULL) { cmn = cm->m_next; cm->m_next = NULL; if (pr->pr_domain->dom_externalize != NULL) { SOCKBUF_UNLOCK(&so->so_rcv); VNET_SO_ASSERT(so); error = (*pr->pr_domain->dom_externalize) (cm, controlp, flags); SOCKBUF_LOCK(&so->so_rcv); } else if (controlp != NULL) *controlp = cm; else m_freem(cm); if (controlp != NULL) { orig_resid = 0; while (*controlp != NULL) controlp = &(*controlp)->m_next; } cm = cmn; } if (m != NULL) nextrecord = so->so_rcv.sb_mb->m_nextpkt; else nextrecord = so->so_rcv.sb_mb; orig_resid = 0; } if (m != NULL) { if ((flags & MSG_PEEK) == 0) { KASSERT(m->m_nextpkt == nextrecord, ("soreceive: post-control, nextrecord !sync")); if (nextrecord == NULL) { KASSERT(so->so_rcv.sb_mb == m, ("soreceive: post-control, sb_mb!=m")); KASSERT(so->so_rcv.sb_lastrecord == m, ("soreceive: post-control, lastrecord!=m")); } } type = m->m_type; if (type == MT_OOBDATA) flags |= MSG_OOB; } else { if ((flags & MSG_PEEK) == 0) { KASSERT(so->so_rcv.sb_mb == nextrecord, ("soreceive: sb_mb != nextrecord")); if (so->so_rcv.sb_mb == NULL) { KASSERT(so->so_rcv.sb_lastrecord == NULL, ("soreceive: sb_lastercord != NULL")); } } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); /* * Now continue to read any data mbufs off of the head of the socket * buffer until the read request is satisfied. Note that 'type' is * used to store the type of any mbuf reads that have happened so far * such that soreceive() can stop reading if the type changes, which * causes soreceive() to return only one of regular data and inline * out-of-band data in a single socket receive operation. */ moff = 0; offset = 0; while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0 && error == 0) { /* * If the type of mbuf has changed since the last mbuf * examined ('type'), end the receive operation. */ SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) { if (type != m->m_type) break; } else if (type == MT_OOBDATA) break; else KASSERT(m->m_type == MT_DATA, ("m->m_type == %d", m->m_type)); so->so_rcv.sb_state &= ~SBS_RCVATMARK; len = uio->uio_resid; if (so->so_oobmark && len > so->so_oobmark - offset) len = so->so_oobmark - offset; if (len > m->m_len - moff) len = m->m_len - moff; /* * If mp is set, just pass back the mbufs. Otherwise copy * them out via the uio, then free. Sockbuf must be * consistent here (points to current mbuf, it points to next * record) when we drop priority; we must note any additions * to the sockbuf when we block interrupts again. */ if (mp == NULL) { SOCKBUF_LOCK_ASSERT(&so->so_rcv); SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); error = uiomove(mtod(m, char *) + moff, (int)len, uio); SOCKBUF_LOCK(&so->so_rcv); if (error) { /* * The MT_SONAME mbuf has already been removed * from the record, so it is necessary to * remove the data mbufs, if any, to preserve * the invariant in the case of PR_ADDR that * requires MT_SONAME mbufs at the head of * each record. */ if (pr->pr_flags & PR_ATOMIC && ((flags & MSG_PEEK) == 0)) (void)sbdroprecord_locked(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); goto release; } } else uio->uio_resid -= len; SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (len == m->m_len - moff) { if (m->m_flags & M_EOR) flags |= MSG_EOR; if (flags & MSG_PEEK) { m = m->m_next; moff = 0; } else { nextrecord = m->m_nextpkt; sbfree(&so->so_rcv, m); if (mp != NULL) { m->m_nextpkt = NULL; *mp = m; mp = &m->m_next; so->so_rcv.sb_mb = m = m->m_next; *mp = NULL; } else { so->so_rcv.sb_mb = m_free(m); m = so->so_rcv.sb_mb; } sockbuf_pushsync(&so->so_rcv, nextrecord); SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); } } else { if (flags & MSG_PEEK) moff += len; else { if (mp != NULL) { if (flags & MSG_DONTWAIT) { *mp = m_copym(m, 0, len, M_NOWAIT); if (*mp == NULL) { /* * m_copym() couldn't * allocate an mbuf. * Adjust uio_resid back * (it was adjusted * down by len bytes, * which we didn't end * up "copying" over). */ uio->uio_resid += len; break; } } else { SOCKBUF_UNLOCK(&so->so_rcv); *mp = m_copym(m, 0, len, M_WAITOK); SOCKBUF_LOCK(&so->so_rcv); } } sbcut_locked(&so->so_rcv, len); } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (so->so_oobmark) { if ((flags & MSG_PEEK) == 0) { so->so_oobmark -= len; if (so->so_oobmark == 0) { so->so_rcv.sb_state |= SBS_RCVATMARK; break; } } else { offset += len; if (offset == so->so_oobmark) break; } } if (flags & MSG_EOR) break; /* * If the MSG_WAITALL flag is set (for non-atomic socket), we * must not quit until "uio->uio_resid == 0" or an error * termination. If a signal/timeout occurs, return with a * short count but without error. Keep sockbuf locked * against other readers. */ while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && !sosendallatonce(so) && nextrecord == NULL) { SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) break; /* * Notify the protocol that some data has been * drained before blocking. */ if (pr->pr_flags & PR_WANTRCVD) { SOCKBUF_UNLOCK(&so->so_rcv); VNET_SO_ASSERT(so); (*pr->pr_usrreqs->pru_rcvd)(so, flags); SOCKBUF_LOCK(&so->so_rcv); } SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); /* * We could receive some data while was notifying * the protocol. Skip blocking in this case. */ if (so->so_rcv.sb_mb == NULL) { error = sbwait(&so->so_rcv); if (error) { SOCKBUF_UNLOCK(&so->so_rcv); goto release; } } m = so->so_rcv.sb_mb; if (m != NULL) nextrecord = m->m_nextpkt; } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (m != NULL && pr->pr_flags & PR_ATOMIC) { flags |= MSG_TRUNC; if ((flags & MSG_PEEK) == 0) (void) sbdroprecord_locked(&so->so_rcv); } if ((flags & MSG_PEEK) == 0) { if (m == NULL) { /* * First part is an inline SB_EMPTY_FIXUP(). Second * part makes sure sb_lastrecord is up-to-date if * there is still data in the socket buffer. */ so->so_rcv.sb_mb = nextrecord; if (so->so_rcv.sb_mb == NULL) { so->so_rcv.sb_mbtail = NULL; so->so_rcv.sb_lastrecord = NULL; } else if (nextrecord->m_nextpkt == NULL) so->so_rcv.sb_lastrecord = nextrecord; } SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); /* * If soreceive() is being done from the socket callback, * then don't need to generate ACK to peer to update window, * since ACK will be generated on return to TCP. */ if (!(flags & MSG_SOCALLBCK) && (pr->pr_flags & PR_WANTRCVD)) { SOCKBUF_UNLOCK(&so->so_rcv); VNET_SO_ASSERT(so); (*pr->pr_usrreqs->pru_rcvd)(so, flags); SOCKBUF_LOCK(&so->so_rcv); } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (orig_resid == uio->uio_resid && orig_resid && (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { SOCKBUF_UNLOCK(&so->so_rcv); goto restart; } SOCKBUF_UNLOCK(&so->so_rcv); if (flagsp != NULL) *flagsp |= flags; release: sbunlock(&so->so_rcv); return (error); } /* * Optimized version of soreceive() for stream (TCP) sockets. * XXXAO: (MSG_WAITALL | MSG_PEEK) isn't properly handled. */ int soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { int len = 0, error = 0, flags, oresid; struct sockbuf *sb; struct mbuf *m, *n = NULL; /* We only do stream sockets. */ if (so->so_type != SOCK_STREAM) return (EINVAL); if (psa != NULL) *psa = NULL; if (controlp != NULL) return (EINVAL); if (flagsp != NULL) flags = *flagsp &~ MSG_EOR; else flags = 0; if (flags & MSG_OOB) return (soreceive_rcvoob(so, uio, flags)); if (mp0 != NULL) *mp0 = NULL; sb = &so->so_rcv; /* Prevent other readers from entering the socket. */ error = sblock(sb, SBLOCKWAIT(flags)); if (error) goto out; SOCKBUF_LOCK(sb); /* Easy one, no space to copyout anything. */ if (uio->uio_resid == 0) { error = EINVAL; goto out; } oresid = uio->uio_resid; /* We will never ever get anything unless we are or were connected. */ if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { error = ENOTCONN; goto out; } restart: SOCKBUF_LOCK_ASSERT(&so->so_rcv); /* Abort if socket has reported problems. */ if (so->so_error) { if (sbavail(sb) > 0) goto deliver; if (oresid > uio->uio_resid) goto out; error = so->so_error; if (!(flags & MSG_PEEK)) so->so_error = 0; goto out; } /* Door is closed. Deliver what is left, if any. */ if (sb->sb_state & SBS_CANTRCVMORE) { if (sbavail(sb) > 0) goto deliver; else goto out; } /* Socket buffer is empty and we shall not block. */ if (sbavail(sb) == 0 && ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) { error = EAGAIN; goto out; } /* Socket buffer got some data that we shall deliver now. */ if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) && ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)) || sbavail(sb) >= sb->sb_lowat || sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat) ) { goto deliver; } /* On MSG_WAITALL we must wait until all data or error arrives. */ if ((flags & MSG_WAITALL) && (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat)) goto deliver; /* * Wait and block until (more) data comes in. * NB: Drops the sockbuf lock during wait. */ error = sbwait(sb); if (error) goto out; goto restart; deliver: SOCKBUF_LOCK_ASSERT(&so->so_rcv); KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__)); KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__)); /* Statistics. */ if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; /* Fill uio until full or current end of socket buffer is reached. */ len = min(uio->uio_resid, sbavail(sb)); if (mp0 != NULL) { /* Dequeue as many mbufs as possible. */ if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) { if (*mp0 == NULL) *mp0 = sb->sb_mb; else m_cat(*mp0, sb->sb_mb); for (m = sb->sb_mb; m != NULL && m->m_len <= len; m = m->m_next) { KASSERT(!(m->m_flags & M_NOTAVAIL), ("%s: m %p not available", __func__, m)); len -= m->m_len; uio->uio_resid -= m->m_len; sbfree(sb, m); n = m; } n->m_next = NULL; sb->sb_mb = m; sb->sb_lastrecord = sb->sb_mb; if (sb->sb_mb == NULL) SB_EMPTY_FIXUP(sb); } /* Copy the remainder. */ if (len > 0) { KASSERT(sb->sb_mb != NULL, ("%s: len > 0 && sb->sb_mb empty", __func__)); m = m_copym(sb->sb_mb, 0, len, M_NOWAIT); if (m == NULL) len = 0; /* Don't flush data from sockbuf. */ else uio->uio_resid -= len; if (*mp0 != NULL) m_cat(*mp0, m); else *mp0 = m; if (*mp0 == NULL) { error = ENOBUFS; goto out; } } } else { /* NB: Must unlock socket buffer as uiomove may sleep. */ SOCKBUF_UNLOCK(sb); error = m_mbuftouio(uio, sb->sb_mb, len); SOCKBUF_LOCK(sb); if (error) goto out; } SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); /* * Remove the delivered data from the socket buffer unless we * were only peeking. */ if (!(flags & MSG_PEEK)) { if (len > 0) sbdrop_locked(sb, len); /* Notify protocol that we drained some data. */ if ((so->so_proto->pr_flags & PR_WANTRCVD) && (((flags & MSG_WAITALL) && uio->uio_resid > 0) || !(flags & MSG_SOCALLBCK))) { SOCKBUF_UNLOCK(sb); VNET_SO_ASSERT(so); (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags); SOCKBUF_LOCK(sb); } } /* * For MSG_WAITALL we may have to loop again and wait for * more data to come in. */ if ((flags & MSG_WAITALL) && uio->uio_resid > 0) goto restart; out: SOCKBUF_LOCK_ASSERT(sb); SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); SOCKBUF_UNLOCK(sb); sbunlock(sb); return (error); } /* * Optimized version of soreceive() for simple datagram cases from userspace. * Unlike in the stream case, we're able to drop a datagram if copyout() * fails, and because we handle datagrams atomically, we don't need to use a * sleep lock to prevent I/O interlacing. */ int soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { struct mbuf *m, *m2; int flags, error; ssize_t len; struct protosw *pr = so->so_proto; struct mbuf *nextrecord; if (psa != NULL) *psa = NULL; if (controlp != NULL) *controlp = NULL; if (flagsp != NULL) flags = *flagsp &~ MSG_EOR; else flags = 0; /* * For any complicated cases, fall back to the full * soreceive_generic(). */ if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB)) return (soreceive_generic(so, psa, uio, mp0, controlp, flagsp)); /* * Enforce restrictions on use. */ KASSERT((pr->pr_flags & PR_WANTRCVD) == 0, ("soreceive_dgram: wantrcvd")); KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic")); KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0, ("soreceive_dgram: SBS_RCVATMARK")); KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0, ("soreceive_dgram: P_CONNREQUIRED")); /* * Loop blocking while waiting for a datagram. */ SOCKBUF_LOCK(&so->so_rcv); while ((m = so->so_rcv.sb_mb) == NULL) { KASSERT(sbavail(&so->so_rcv) == 0, ("soreceive_dgram: sb_mb NULL but sbavail %u", sbavail(&so->so_rcv))); if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_rcv); return (error); } if (so->so_rcv.sb_state & SBS_CANTRCVMORE || uio->uio_resid == 0) { SOCKBUF_UNLOCK(&so->so_rcv); return (0); } if ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO))) { SOCKBUF_UNLOCK(&so->so_rcv); return (EWOULDBLOCK); } SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); error = sbwait(&so->so_rcv); if (error) { SOCKBUF_UNLOCK(&so->so_rcv); return (error); } } SOCKBUF_LOCK_ASSERT(&so->so_rcv); if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); nextrecord = m->m_nextpkt; if (nextrecord == NULL) { KASSERT(so->so_rcv.sb_lastrecord == m, ("soreceive_dgram: lastrecord != m")); } KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord, ("soreceive_dgram: m_nextpkt != nextrecord")); /* * Pull 'm' and its chain off the front of the packet queue. */ so->so_rcv.sb_mb = NULL; sockbuf_pushsync(&so->so_rcv, nextrecord); /* * Walk 'm's chain and free that many bytes from the socket buffer. */ for (m2 = m; m2 != NULL; m2 = m2->m_next) sbfree(&so->so_rcv, m2); /* * Do a few last checks before we let go of the lock. */ SBLASTRECORDCHK(&so->so_rcv); SBLASTMBUFCHK(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); if (pr->pr_flags & PR_ADDR) { KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); if (psa != NULL) *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_NOWAIT); m = m_free(m); } if (m == NULL) { /* XXXRW: Can this happen? */ return (0); } /* * Packet to copyout() is now in 'm' and it is disconnected from the * queue. * * Process one or more MT_CONTROL mbufs present before any data mbufs * in the first mbuf chain on the socket buffer. We call into the * protocol to perform externalization (or freeing if controlp == * NULL). In some cases there can be only MT_CONTROL mbufs without * MT_DATA mbufs. */ if (m->m_type == MT_CONTROL) { struct mbuf *cm = NULL, *cmn; struct mbuf **cme = &cm; do { m2 = m->m_next; m->m_next = NULL; *cme = m; cme = &(*cme)->m_next; m = m2; } while (m != NULL && m->m_type == MT_CONTROL); while (cm != NULL) { cmn = cm->m_next; cm->m_next = NULL; if (pr->pr_domain->dom_externalize != NULL) { error = (*pr->pr_domain->dom_externalize) (cm, controlp, flags); } else if (controlp != NULL) *controlp = cm; else m_freem(cm); if (controlp != NULL) { while (*controlp != NULL) controlp = &(*controlp)->m_next; } cm = cmn; } } KASSERT(m == NULL || m->m_type == MT_DATA, ("soreceive_dgram: !data")); while (m != NULL && uio->uio_resid > 0) { len = uio->uio_resid; if (len > m->m_len) len = m->m_len; error = uiomove(mtod(m, char *), (int)len, uio); if (error) { m_freem(m); return (error); } if (len == m->m_len) m = m_free(m); else { m->m_data += len; m->m_len -= len; } } if (m != NULL) { flags |= MSG_TRUNC; m_freem(m); } if (flagsp != NULL) *flagsp |= flags; return (0); } int soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { int error; CURVNET_SET(so->so_vnet); error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0, controlp, flagsp)); CURVNET_RESTORE(); return (error); } int soshutdown(struct socket *so, int how) { struct protosw *pr = so->so_proto; int error, soerror_enotconn; if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) return (EINVAL); soerror_enotconn = 0; if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { /* * POSIX mandates us to return ENOTCONN when shutdown(2) is * invoked on a datagram sockets, however historically we would * actually tear socket down. This is known to be leveraged by * some applications to unblock process waiting in recvXXX(2) * by other process that it shares that socket with. Try to meet * both backward-compatibility and POSIX requirements by forcing * ENOTCONN but still asking protocol to perform pru_shutdown(). */ if (so->so_type != SOCK_DGRAM) return (ENOTCONN); soerror_enotconn = 1; } CURVNET_SET(so->so_vnet); if (pr->pr_usrreqs->pru_flush != NULL) (*pr->pr_usrreqs->pru_flush)(so, how); if (how != SHUT_WR) sorflush(so); if (how != SHUT_RD) { error = (*pr->pr_usrreqs->pru_shutdown)(so); wakeup(&so->so_timeo); CURVNET_RESTORE(); return ((error == 0 && soerror_enotconn) ? ENOTCONN : error); } wakeup(&so->so_timeo); CURVNET_RESTORE(); return (soerror_enotconn ? ENOTCONN : 0); } void sorflush(struct socket *so) { struct sockbuf *sb = &so->so_rcv; struct protosw *pr = so->so_proto; struct socket aso; VNET_SO_ASSERT(so); /* * In order to avoid calling dom_dispose with the socket buffer mutex * held, and in order to generally avoid holding the lock for a long * time, we make a copy of the socket buffer and clear the original * (except locks, state). The new socket buffer copy won't have * initialized locks so we can only call routines that won't use or * assert those locks. * * Dislodge threads currently blocked in receive and wait to acquire * a lock against other simultaneous readers before clearing the * socket buffer. Don't let our acquire be interrupted by a signal * despite any existing socket disposition on interruptable waiting. */ socantrcvmore(so); (void) sblock(sb, SBL_WAIT | SBL_NOINTR); /* * Invalidate/clear most of the sockbuf structure, but leave selinfo * and mutex data unchanged. */ SOCKBUF_LOCK(sb); bzero(&aso, sizeof(aso)); aso.so_pcb = so->so_pcb; bcopy(&sb->sb_startzero, &aso.so_rcv.sb_startzero, sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); bzero(&sb->sb_startzero, sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); SOCKBUF_UNLOCK(sb); sbunlock(sb); /* * Dispose of special rights and flush the copied socket. Don't call * any unsafe routines (that rely on locks being initialized) on aso. */ if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) (*pr->pr_domain->dom_dispose)(&aso); sbrelease_internal(&aso.so_rcv, so); } /* * Wrapper for Socket established helper hook. * Parameters: socket, context of the hook point, hook id. */ static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id) { struct socket_hhook_data hhook_data = { .so = so, .hctx = hctx, .m = NULL, .status = 0 }; CURVNET_SET(so->so_vnet); HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd); CURVNET_RESTORE(); /* Ugly but needed, since hhooks return void for now */ return (hhook_data.status); } /* * Perhaps this routine, and sooptcopyout(), below, ought to come in an * additional variant to handle the case where the option value needs to be * some kind of integer, but not a specific size. In addition to their use * here, these functions are also called by the protocol-level pr_ctloutput() * routines. */ int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) { size_t valsize; /* * If the user gives us more than we wanted, we ignore it, but if we * don't get the minimum length the caller wants, we return EINVAL. * On success, sopt->sopt_valsize is set to however much we actually * retrieved. */ if ((valsize = sopt->sopt_valsize) < minlen) return EINVAL; if (valsize > len) sopt->sopt_valsize = valsize = len; if (sopt->sopt_td != NULL) return (copyin(sopt->sopt_val, buf, valsize)); bcopy(sopt->sopt_val, buf, valsize); return (0); } /* * Kernel version of setsockopt(2). * * XXX: optlen is size_t, not socklen_t */ int so_setsockopt(struct socket *so, int level, int optname, void *optval, size_t optlen) { struct sockopt sopt; sopt.sopt_level = level; sopt.sopt_name = optname; sopt.sopt_dir = SOPT_SET; sopt.sopt_val = optval; sopt.sopt_valsize = optlen; sopt.sopt_td = NULL; return (sosetopt(so, &sopt)); } int sosetopt(struct socket *so, struct sockopt *sopt) { int error, optval; struct linger l; struct timeval tv; sbintime_t val; uint32_t val32; #ifdef MAC struct mac extmac; #endif CURVNET_SET(so->so_vnet); error = 0; if (sopt->sopt_level != SOL_SOCKET) { if (so->so_proto->pr_ctloutput != NULL) { error = (*so->so_proto->pr_ctloutput)(so, sopt); CURVNET_RESTORE(); return (error); } error = ENOPROTOOPT; } else { switch (sopt->sopt_name) { case SO_ACCEPTFILTER: - error = do_setopt_accept_filter(so, sopt); + error = accept_filt_setopt(so, sopt); if (error) goto bad; break; case SO_LINGER: error = sooptcopyin(sopt, &l, sizeof l, sizeof l); if (error) goto bad; SOCK_LOCK(so); so->so_linger = l.l_linger; if (l.l_onoff) so->so_options |= SO_LINGER; else so->so_options &= ~SO_LINGER; SOCK_UNLOCK(so); break; case SO_DEBUG: case SO_KEEPALIVE: case SO_DONTROUTE: case SO_USELOOPBACK: case SO_BROADCAST: case SO_REUSEADDR: case SO_REUSEPORT: case SO_OOBINLINE: case SO_TIMESTAMP: case SO_BINTIME: case SO_NOSIGPIPE: case SO_NO_DDP: case SO_NO_OFFLOAD: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) goto bad; SOCK_LOCK(so); if (optval) so->so_options |= sopt->sopt_name; else so->so_options &= ~sopt->sopt_name; SOCK_UNLOCK(so); break; case SO_SETFIB: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) goto bad; if (optval < 0 || optval >= rt_numfibs) { error = EINVAL; goto bad; } if (((so->so_proto->pr_domain->dom_family == PF_INET) || (so->so_proto->pr_domain->dom_family == PF_INET6) || (so->so_proto->pr_domain->dom_family == PF_ROUTE))) so->so_fibnum = optval; else so->so_fibnum = 0; break; case SO_USER_COOKIE: error = sooptcopyin(sopt, &val32, sizeof val32, sizeof val32); if (error) goto bad; so->so_user_cookie = val32; break; case SO_SNDBUF: case SO_RCVBUF: case SO_SNDLOWAT: case SO_RCVLOWAT: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) goto bad; /* * Values < 1 make no sense for any of these options, * so disallow them. */ if (optval < 1) { error = EINVAL; goto bad; } switch (sopt->sopt_name) { case SO_SNDBUF: case SO_RCVBUF: if (sbreserve(sopt->sopt_name == SO_SNDBUF ? &so->so_snd : &so->so_rcv, (u_long)optval, so, curthread) == 0) { error = ENOBUFS; goto bad; } (sopt->sopt_name == SO_SNDBUF ? &so->so_snd : &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE; break; /* * Make sure the low-water is never greater than the * high-water. */ case SO_SNDLOWAT: SOCKBUF_LOCK(&so->so_snd); so->so_snd.sb_lowat = (optval > so->so_snd.sb_hiwat) ? so->so_snd.sb_hiwat : optval; SOCKBUF_UNLOCK(&so->so_snd); break; case SO_RCVLOWAT: SOCKBUF_LOCK(&so->so_rcv); so->so_rcv.sb_lowat = (optval > so->so_rcv.sb_hiwat) ? so->so_rcv.sb_hiwat : optval; SOCKBUF_UNLOCK(&so->so_rcv); break; } break; case SO_SNDTIMEO: case SO_RCVTIMEO: #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { struct timeval32 tv32; error = sooptcopyin(sopt, &tv32, sizeof tv32, sizeof tv32); CP(tv32, tv, tv_sec); CP(tv32, tv, tv_usec); } else #endif error = sooptcopyin(sopt, &tv, sizeof tv, sizeof tv); if (error) goto bad; if (tv.tv_sec < 0 || tv.tv_usec < 0 || tv.tv_usec >= 1000000) { error = EDOM; goto bad; } if (tv.tv_sec > INT32_MAX) val = SBT_MAX; else val = tvtosbt(tv); switch (sopt->sopt_name) { case SO_SNDTIMEO: so->so_snd.sb_timeo = val; break; case SO_RCVTIMEO: so->so_rcv.sb_timeo = val; break; } break; case SO_LABEL: #ifdef MAC error = sooptcopyin(sopt, &extmac, sizeof extmac, sizeof extmac); if (error) goto bad; error = mac_setsockopt_label(sopt->sopt_td->td_ucred, so, &extmac); #else error = EOPNOTSUPP; #endif break; case SO_TS_CLOCK: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) goto bad; if (optval < 0 || optval > SO_TS_CLOCK_MAX) { error = EINVAL; goto bad; } so->so_ts_clock = optval; break; case SO_MAX_PACING_RATE: error = sooptcopyin(sopt, &val32, sizeof(val32), sizeof(val32)); if (error) goto bad; so->so_max_pacing_rate = val32; break; default: if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0) error = hhook_run_socket(so, sopt, HHOOK_SOCKET_OPT); else error = ENOPROTOOPT; break; } if (error == 0 && so->so_proto->pr_ctloutput != NULL) (void)(*so->so_proto->pr_ctloutput)(so, sopt); } bad: CURVNET_RESTORE(); return (error); } /* * Helper routine for getsockopt. */ int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) { int error; size_t valsize; error = 0; /* * Documented get behavior is that we always return a value, possibly * truncated to fit in the user's buffer. Traditional behavior is * that we always tell the user precisely how much we copied, rather * than something useful like the total amount we had available for * her. Note that this interface is not idempotent; the entire * answer must be generated ahead of time. */ valsize = min(len, sopt->sopt_valsize); sopt->sopt_valsize = valsize; if (sopt->sopt_val != NULL) { if (sopt->sopt_td != NULL) error = copyout(buf, sopt->sopt_val, valsize); else bcopy(buf, sopt->sopt_val, valsize); } return (error); } int sogetopt(struct socket *so, struct sockopt *sopt) { int error, optval; struct linger l; struct timeval tv; #ifdef MAC struct mac extmac; #endif CURVNET_SET(so->so_vnet); error = 0; if (sopt->sopt_level != SOL_SOCKET) { if (so->so_proto->pr_ctloutput != NULL) error = (*so->so_proto->pr_ctloutput)(so, sopt); else error = ENOPROTOOPT; CURVNET_RESTORE(); return (error); } else { switch (sopt->sopt_name) { case SO_ACCEPTFILTER: - error = do_getopt_accept_filter(so, sopt); + error = accept_filt_getopt(so, sopt); break; case SO_LINGER: SOCK_LOCK(so); l.l_onoff = so->so_options & SO_LINGER; l.l_linger = so->so_linger; SOCK_UNLOCK(so); error = sooptcopyout(sopt, &l, sizeof l); break; case SO_USELOOPBACK: case SO_DONTROUTE: case SO_DEBUG: case SO_KEEPALIVE: case SO_REUSEADDR: case SO_REUSEPORT: case SO_BROADCAST: case SO_OOBINLINE: case SO_ACCEPTCONN: case SO_TIMESTAMP: case SO_BINTIME: case SO_NOSIGPIPE: optval = so->so_options & sopt->sopt_name; integer: error = sooptcopyout(sopt, &optval, sizeof optval); break; case SO_TYPE: optval = so->so_type; goto integer; case SO_PROTOCOL: optval = so->so_proto->pr_protocol; goto integer; case SO_ERROR: SOCK_LOCK(so); optval = so->so_error; so->so_error = 0; SOCK_UNLOCK(so); goto integer; case SO_SNDBUF: optval = so->so_snd.sb_hiwat; goto integer; case SO_RCVBUF: optval = so->so_rcv.sb_hiwat; goto integer; case SO_SNDLOWAT: optval = so->so_snd.sb_lowat; goto integer; case SO_RCVLOWAT: optval = so->so_rcv.sb_lowat; goto integer; case SO_SNDTIMEO: case SO_RCVTIMEO: tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ? so->so_snd.sb_timeo : so->so_rcv.sb_timeo); #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { struct timeval32 tv32; CP(tv, tv32, tv_sec); CP(tv, tv32, tv_usec); error = sooptcopyout(sopt, &tv32, sizeof tv32); } else #endif error = sooptcopyout(sopt, &tv, sizeof tv); break; case SO_LABEL: #ifdef MAC error = sooptcopyin(sopt, &extmac, sizeof(extmac), sizeof(extmac)); if (error) goto bad; error = mac_getsockopt_label(sopt->sopt_td->td_ucred, so, &extmac); if (error) goto bad; error = sooptcopyout(sopt, &extmac, sizeof extmac); #else error = EOPNOTSUPP; #endif break; case SO_PEERLABEL: #ifdef MAC error = sooptcopyin(sopt, &extmac, sizeof(extmac), sizeof(extmac)); if (error) goto bad; error = mac_getsockopt_peerlabel( sopt->sopt_td->td_ucred, so, &extmac); if (error) goto bad; error = sooptcopyout(sopt, &extmac, sizeof extmac); #else error = EOPNOTSUPP; #endif break; case SO_LISTENQLIMIT: optval = so->so_qlimit; goto integer; case SO_LISTENQLEN: optval = so->so_qlen; goto integer; case SO_LISTENINCQLEN: optval = so->so_incqlen; goto integer; case SO_TS_CLOCK: optval = so->so_ts_clock; goto integer; case SO_MAX_PACING_RATE: optval = so->so_max_pacing_rate; goto integer; default: if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0) error = hhook_run_socket(so, sopt, HHOOK_SOCKET_OPT); else error = ENOPROTOOPT; break; } } #ifdef MAC bad: #endif CURVNET_RESTORE(); return (error); } int soopt_getm(struct sockopt *sopt, struct mbuf **mp) { struct mbuf *m, *m_prev; int sopt_size = sopt->sopt_valsize; MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA); if (m == NULL) return ENOBUFS; if (sopt_size > MLEN) { MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(m); return ENOBUFS; } m->m_len = min(MCLBYTES, sopt_size); } else { m->m_len = min(MLEN, sopt_size); } sopt_size -= m->m_len; *mp = m; m_prev = m; while (sopt_size) { MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(*mp); return ENOBUFS; } if (sopt_size > MLEN) { MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK : M_NOWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); m_freem(*mp); return ENOBUFS; } m->m_len = min(MCLBYTES, sopt_size); } else { m->m_len = min(MLEN, sopt_size); } sopt_size -= m->m_len; m_prev->m_next = m; m_prev = m; } return (0); } int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) { struct mbuf *m0 = m; if (sopt->sopt_val == NULL) return (0); while (m != NULL && sopt->sopt_valsize >= m->m_len) { if (sopt->sopt_td != NULL) { int error; error = copyin(sopt->sopt_val, mtod(m, char *), m->m_len); if (error != 0) { m_freem(m0); return(error); } } else bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); sopt->sopt_valsize -= m->m_len; sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; m = m->m_next; } if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ panic("ip6_sooptmcopyin"); return (0); } int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) { struct mbuf *m0 = m; size_t valsize = 0; if (sopt->sopt_val == NULL) return (0); while (m != NULL && sopt->sopt_valsize >= m->m_len) { if (sopt->sopt_td != NULL) { int error; error = copyout(mtod(m, char *), sopt->sopt_val, m->m_len); if (error != 0) { m_freem(m0); return(error); } } else bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); sopt->sopt_valsize -= m->m_len; sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; valsize += m->m_len; m = m->m_next; } if (m != NULL) { /* enough soopt buffer should be given from user-land */ m_freem(m0); return(EINVAL); } sopt->sopt_valsize = valsize; return (0); } /* * sohasoutofband(): protocol notifies socket layer of the arrival of new * out-of-band data, which will then notify socket consumers. */ void sohasoutofband(struct socket *so) { if (so->so_sigio != NULL) pgsigio(&so->so_sigio, SIGURG, 0); selwakeuppri(&so->so_rcv.sb_sel, PSOCK); } int sopoll(struct socket *so, int events, struct ucred *active_cred, struct thread *td) { /* * We do not need to set or assert curvnet as long as everyone uses * sopoll_generic(). */ return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred, td)); } int sopoll_generic(struct socket *so, int events, struct ucred *active_cred, struct thread *td) { int revents = 0; SOCKBUF_LOCK(&so->so_snd); SOCKBUF_LOCK(&so->so_rcv); if (events & (POLLIN | POLLRDNORM)) if (soreadabledata(so)) revents |= events & (POLLIN | POLLRDNORM); if (events & (POLLOUT | POLLWRNORM)) if (sowriteable(so)) revents |= events & (POLLOUT | POLLWRNORM); if (events & (POLLPRI | POLLRDBAND)) if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) revents |= events & (POLLPRI | POLLRDBAND); if ((events & POLLINIGNEOF) == 0) { if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { revents |= events & (POLLIN | POLLRDNORM); if (so->so_snd.sb_state & SBS_CANTSENDMORE) revents |= POLLHUP; } } if (revents == 0) { if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { selrecord(td, &so->so_rcv.sb_sel); so->so_rcv.sb_flags |= SB_SEL; } if (events & (POLLOUT | POLLWRNORM)) { selrecord(td, &so->so_snd.sb_sel); so->so_snd.sb_flags |= SB_SEL; } } SOCKBUF_UNLOCK(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_snd); return (revents); } int soo_kqfilter(struct file *fp, struct knote *kn) { struct socket *so = kn->kn_fp->f_data; struct sockbuf *sb; switch (kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &soread_filtops; sb = &so->so_rcv; break; case EVFILT_WRITE: kn->kn_fop = &sowrite_filtops; sb = &so->so_snd; break; case EVFILT_EMPTY: kn->kn_fop = &soempty_filtops; sb = &so->so_snd; break; default: return (EINVAL); } SOCKBUF_LOCK(sb); knlist_add(&sb->sb_sel.si_note, kn, 1); sb->sb_flags |= SB_KNOTE; SOCKBUF_UNLOCK(sb); return (0); } /* * Some routines that return EOPNOTSUPP for entry points that are not * supported by a protocol. Fill in as needed. */ int pru_accept_notsupp(struct socket *so, struct sockaddr **nam) { return EOPNOTSUPP; } int pru_aio_queue_notsupp(struct socket *so, struct kaiocb *job) { return EOPNOTSUPP; } int pru_attach_notsupp(struct socket *so, int proto, struct thread *td) { return EOPNOTSUPP; } int pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) { return EOPNOTSUPP; } int pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { return EOPNOTSUPP; } int pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) { return EOPNOTSUPP; } int pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { return EOPNOTSUPP; } int pru_connect2_notsupp(struct socket *so1, struct socket *so2) { return EOPNOTSUPP; } int pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, struct thread *td) { return EOPNOTSUPP; } int pru_disconnect_notsupp(struct socket *so) { return EOPNOTSUPP; } int pru_listen_notsupp(struct socket *so, int backlog, struct thread *td) { return EOPNOTSUPP; } int pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) { return EOPNOTSUPP; } int pru_rcvd_notsupp(struct socket *so, int flags) { return EOPNOTSUPP; } int pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) { return EOPNOTSUPP; } int pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *td) { return EOPNOTSUPP; } int pru_ready_notsupp(struct socket *so, struct mbuf *m, int count) { return (EOPNOTSUPP); } /* * This isn't really a ``null'' operation, but it's the default one and * doesn't do anything destructive. */ int pru_sense_null(struct socket *so, struct stat *sb) { sb->st_blksize = so->so_snd.sb_hiwat; return 0; } int pru_shutdown_notsupp(struct socket *so) { return EOPNOTSUPP; } int pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) { return EOPNOTSUPP; } int pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { return EOPNOTSUPP; } int pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { return EOPNOTSUPP; } int pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred, struct thread *td) { return EOPNOTSUPP; } static void filt_sordetach(struct knote *kn) { struct socket *so = kn->kn_fp->f_data; SOCKBUF_LOCK(&so->so_rcv); knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1); if (knlist_empty(&so->so_rcv.sb_sel.si_note)) so->so_rcv.sb_flags &= ~SB_KNOTE; SOCKBUF_UNLOCK(&so->so_rcv); } /*ARGSUSED*/ static int filt_soread(struct knote *kn, long hint) { struct socket *so; so = kn->kn_fp->f_data; if (so->so_options & SO_ACCEPTCONN) { kn->kn_data = so->so_qlen; return (!TAILQ_EMPTY(&so->so_comp)); } SOCKBUF_LOCK_ASSERT(&so->so_rcv); kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl; if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; return (1); } else if (so->so_error) /* temporary udp error */ return (1); if (kn->kn_sfflags & NOTE_LOWAT) { if (kn->kn_data >= kn->kn_sdata) return (1); } else if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat) return (1); /* This hook returning non-zero indicates an event, not error */ return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD)); } static void filt_sowdetach(struct knote *kn) { struct socket *so = kn->kn_fp->f_data; SOCKBUF_LOCK(&so->so_snd); knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1); if (knlist_empty(&so->so_snd.sb_sel.si_note)) so->so_snd.sb_flags &= ~SB_KNOTE; SOCKBUF_UNLOCK(&so->so_snd); } /*ARGSUSED*/ static int filt_sowrite(struct knote *kn, long hint) { struct socket *so; so = kn->kn_fp->f_data; SOCKBUF_LOCK_ASSERT(&so->so_snd); kn->kn_data = sbspace(&so->so_snd); hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; return (1); } else if (so->so_error) /* temporary udp error */ return (1); else if (((so->so_state & SS_ISCONNECTED) == 0) && (so->so_proto->pr_flags & PR_CONNREQUIRED)) return (0); else if (kn->kn_sfflags & NOTE_LOWAT) return (kn->kn_data >= kn->kn_sdata); else return (kn->kn_data >= so->so_snd.sb_lowat); } static int filt_soempty(struct knote *kn, long hint) { struct socket *so; so = kn->kn_fp->f_data; SOCKBUF_LOCK_ASSERT(&so->so_snd); kn->kn_data = sbused(&so->so_snd); if (kn->kn_data == 0) return (1); else return (0); } int socheckuid(struct socket *so, uid_t uid) { if (so == NULL) return (EPERM); if (so->so_cred->cr_uid != uid) return (EPERM); return (0); } /* * These functions are used by protocols to notify the socket layer (and its * consumers) of state changes in the sockets driven by protocol-side events. */ /* * Procedures to manipulate state flags of socket and do appropriate wakeups. * * Normal sequence from the active (originating) side is that * soisconnecting() is called during processing of connect() call, resulting * in an eventual call to soisconnected() if/when the connection is * established. When the connection is torn down soisdisconnecting() is * called during processing of disconnect() call, and soisdisconnected() is * called when the connection to the peer is totally severed. The semantics * of these routines are such that connectionless protocols can call * soisconnected() and soisdisconnected() only, bypassing the in-progress * calls when setting up a ``connection'' takes no time. * * From the passive side, a socket is created with two queues of sockets: * so_incomp for connections in progress and so_comp for connections already * made and awaiting user acceptance. As a protocol is preparing incoming * connections, it creates a socket structure queued on so_incomp by calling * sonewconn(). When the connection is established, soisconnected() is * called, and transfers the socket structure to so_comp, making it available * to accept(). * * If a socket is closed with sockets on either so_incomp or so_comp, these * sockets are dropped. * * If higher-level protocols are implemented in the kernel, the wakeups done * here will sometimes cause software-interrupt process scheduling. */ void soisconnecting(struct socket *so) { SOCK_LOCK(so); so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); so->so_state |= SS_ISCONNECTING; SOCK_UNLOCK(so); } void soisconnected(struct socket *so) { struct socket *head; int ret; restart: ACCEPT_LOCK(); SOCK_LOCK(so); so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); so->so_state |= SS_ISCONNECTED; head = so->so_head; if (head != NULL && (so->so_qstate & SQ_INCOMP)) { if ((so->so_options & SO_ACCEPTFILTER) == 0) { SOCK_UNLOCK(so); TAILQ_REMOVE(&head->so_incomp, so, so_list); head->so_incqlen--; so->so_qstate &= ~SQ_INCOMP; TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); head->so_qlen++; so->so_qstate |= SQ_COMP; ACCEPT_UNLOCK(); sorwakeup(head); wakeup_one(&head->so_timeo); } else { ACCEPT_UNLOCK(); soupcall_set(so, SO_RCV, head->so_accf->so_accept_filter->accf_callback, head->so_accf->so_accept_filter_arg); so->so_options &= ~SO_ACCEPTFILTER; ret = head->so_accf->so_accept_filter->accf_callback(so, head->so_accf->so_accept_filter_arg, M_NOWAIT); if (ret == SU_ISCONNECTED) soupcall_clear(so, SO_RCV); SOCK_UNLOCK(so); if (ret == SU_ISCONNECTED) goto restart; } return; } SOCK_UNLOCK(so); ACCEPT_UNLOCK(); wakeup(&so->so_timeo); sorwakeup(so); sowwakeup(so); } void soisdisconnecting(struct socket *so) { /* * Note: This code assumes that SOCK_LOCK(so) and * SOCKBUF_LOCK(&so->so_rcv) are the same. */ SOCKBUF_LOCK(&so->so_rcv); so->so_state &= ~SS_ISCONNECTING; so->so_state |= SS_ISDISCONNECTING; socantrcvmore_locked(so); SOCKBUF_LOCK(&so->so_snd); socantsendmore_locked(so); wakeup(&so->so_timeo); } void soisdisconnected(struct socket *so) { /* * Note: This code assumes that SOCK_LOCK(so) and * SOCKBUF_LOCK(&so->so_rcv) are the same. */ SOCKBUF_LOCK(&so->so_rcv); so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); so->so_state |= SS_ISDISCONNECTED; socantrcvmore_locked(so); SOCKBUF_LOCK(&so->so_snd); sbdrop_locked(&so->so_snd, sbused(&so->so_snd)); socantsendmore_locked(so); wakeup(&so->so_timeo); } /* * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. */ struct sockaddr * sodupsockaddr(const struct sockaddr *sa, int mflags) { struct sockaddr *sa2; sa2 = malloc(sa->sa_len, M_SONAME, mflags); if (sa2) bcopy(sa, sa2, sa->sa_len); return sa2; } /* * Register per-socket buffer upcalls. */ void soupcall_set(struct socket *so, int which, int (*func)(struct socket *, void *, int), void *arg) { struct sockbuf *sb; switch (which) { case SO_RCV: sb = &so->so_rcv; break; case SO_SND: sb = &so->so_snd; break; default: panic("soupcall_set: bad which"); } SOCKBUF_LOCK_ASSERT(sb); #if 0 /* XXX: accf_http actually wants to do this on purpose. */ KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall")); #endif sb->sb_upcall = func; sb->sb_upcallarg = arg; sb->sb_flags |= SB_UPCALL; } void soupcall_clear(struct socket *so, int which) { struct sockbuf *sb; switch (which) { case SO_RCV: sb = &so->so_rcv; break; case SO_SND: sb = &so->so_snd; break; default: panic("soupcall_clear: bad which"); } SOCKBUF_LOCK_ASSERT(sb); KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear")); sb->sb_upcall = NULL; sb->sb_upcallarg = NULL; sb->sb_flags &= ~SB_UPCALL; } /* * Create an external-format (``xsocket'') structure using the information in * the kernel-format socket structure pointed to by so. This is done to * reduce the spew of irrelevant information over this interface, to isolate * user code from changes in the kernel structure, and potentially to provide * information-hiding if we decide that some of this information should be * hidden from users. */ void sotoxsocket(struct socket *so, struct xsocket *xso) { xso->xso_len = sizeof *xso; xso->xso_so = so; xso->so_type = so->so_type; xso->so_options = so->so_options; xso->so_linger = so->so_linger; xso->so_state = so->so_state; xso->so_pcb = so->so_pcb; xso->xso_protocol = so->so_proto->pr_protocol; xso->xso_family = so->so_proto->pr_domain->dom_family; xso->so_qlen = so->so_qlen; xso->so_incqlen = so->so_incqlen; xso->so_qlimit = so->so_qlimit; xso->so_timeo = so->so_timeo; xso->so_error = so->so_error; xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; xso->so_oobmark = so->so_oobmark; sbtoxsockbuf(&so->so_snd, &xso->so_snd); sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); xso->so_uid = so->so_cred->cr_uid; } /* * Socket accessor functions to provide external consumers with * a safe interface to socket state * */ void so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *), void *arg) { TAILQ_FOREACH(so, &so->so_comp, so_list) func(so, arg); } struct sockbuf * so_sockbuf_rcv(struct socket *so) { return (&so->so_rcv); } struct sockbuf * so_sockbuf_snd(struct socket *so) { return (&so->so_snd); } int so_state_get(const struct socket *so) { return (so->so_state); } void so_state_set(struct socket *so, int val) { so->so_state = val; } int so_options_get(const struct socket *so) { return (so->so_options); } void so_options_set(struct socket *so, int val) { so->so_options = val; } int so_error_get(const struct socket *so) { return (so->so_error); } void so_error_set(struct socket *so, int val) { so->so_error = val; } int so_linger_get(const struct socket *so) { return (so->so_linger); } void so_linger_set(struct socket *so, int val) { so->so_linger = val; } struct protosw * so_protosw_get(const struct socket *so) { return (so->so_proto); } void so_protosw_set(struct socket *so, struct protosw *val) { so->so_proto = val; } void so_sorwakeup(struct socket *so) { sorwakeup(so); } void so_sowwakeup(struct socket *so) { sowwakeup(so); } void so_sorwakeup_locked(struct socket *so) { sorwakeup_locked(so); } void so_sowwakeup_locked(struct socket *so) { sowwakeup_locked(so); } void so_lock(struct socket *so) { SOCK_LOCK(so); } void so_unlock(struct socket *so) { SOCK_UNLOCK(so); } Index: projects/clang500-import/sys/kern/uipc_usrreq.c =================================================================== --- projects/clang500-import/sys/kern/uipc_usrreq.c (revision 319548) +++ projects/clang500-import/sys/kern/uipc_usrreq.c (revision 319549) @@ -1,2599 +1,2586 @@ /*- * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. * Copyright (c) 2004-2009 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 */ /* * UNIX Domain (Local) Sockets * * This is an implementation of UNIX (local) domain sockets. Each socket has * an associated struct unpcb (UNIX protocol control block). Stream sockets * may be connected to 0 or 1 other socket. Datagram sockets may be * connected to 0, 1, or many other sockets. Sockets may be created and * connected in pairs (socketpair(2)), or bound/connected to using the file * system name space. For most purposes, only the receive socket buffer is * used, as sending on one socket delivers directly to the receive socket * buffer of a second socket. * * The implementation is substantially complicated by the fact that * "ancillary data", such as file descriptors or credentials, may be passed * across UNIX domain sockets. The potential for passing UNIX domain sockets * over other UNIX domain sockets requires the implementation of a simple * garbage collector to find and tear down cycles of disconnected sockets. * * TODO: * RDM * rethink name space problems * need a proper out-of-band */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include /* XXX must be before */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include MALLOC_DECLARE(M_FILECAPS); /* * Locking key: * (l) Locked using list lock * (g) Locked using linkage lock */ static uma_zone_t unp_zone; static unp_gen_t unp_gencnt; /* (l) */ static u_int unp_count; /* (l) Count of local sockets. */ static ino_t unp_ino; /* Prototype for fake inode numbers. */ static int unp_rights; /* (g) File descriptors in flight. */ static struct unp_head unp_shead; /* (l) List of stream sockets. */ static struct unp_head unp_dhead; /* (l) List of datagram sockets. */ static struct unp_head unp_sphead; /* (l) List of seqpacket sockets. */ struct unp_defer { SLIST_ENTRY(unp_defer) ud_link; struct file *ud_fp; }; static SLIST_HEAD(, unp_defer) unp_defers; static int unp_defers_count; static const struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL }; /* * Garbage collection of cyclic file descriptor/socket references occurs * asynchronously in a taskqueue context in order to avoid recursion and * reentrance in the UNIX domain socket, file descriptor, and socket layer * code. See unp_gc() for a full description. */ static struct timeout_task unp_gc_task; /* * The close of unix domain sockets attached as SCM_RIGHTS is * postponed to the taskqueue, to avoid arbitrary recursion depth. * The attached sockets might have another sockets attached. */ static struct task unp_defer_task; /* * Both send and receive buffers are allocated PIPSIZ bytes of buffering for * stream sockets, although the total for sender and receiver is actually * only PIPSIZ. * * Datagram sockets really use the sendspace as the maximum datagram size, * and don't really want to reserve the sendspace. Their recvspace should be * large enough for at least one max-size datagram plus address. */ #ifndef PIPSIZ #define PIPSIZ 8192 #endif static u_long unpst_sendspace = PIPSIZ; static u_long unpst_recvspace = PIPSIZ; static u_long unpdg_sendspace = 2*1024; /* really max datagram size */ static u_long unpdg_recvspace = 4*1024; static u_long unpsp_sendspace = PIPSIZ; /* really max datagram size */ static u_long unpsp_recvspace = PIPSIZ; static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW, 0, "Local domain"); static SYSCTL_NODE(_net_local, SOCK_STREAM, stream, CTLFLAG_RW, 0, "SOCK_STREAM"); static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, CTLFLAG_RW, 0, "SOCK_DGRAM"); static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket, CTLFLAG_RW, 0, "SOCK_SEQPACKET"); SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, &unpst_sendspace, 0, "Default stream send space."); SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, &unpst_recvspace, 0, "Default stream receive space."); SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, &unpdg_sendspace, 0, "Default datagram send space."); SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, &unpdg_recvspace, 0, "Default datagram receive space."); SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW, &unpsp_sendspace, 0, "Default seqpacket send space."); SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW, &unpsp_recvspace, 0, "Default seqpacket receive space."); SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, "File descriptors in flight."); SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD, &unp_defers_count, 0, "File descriptors deferred to taskqueue for close."); /* * Locking and synchronization: * * Three types of locks exit in the local domain socket implementation: a * global list mutex, a global linkage rwlock, and per-unpcb mutexes. Of the * global locks, the list lock protects the socket count, global generation * number, and stream/datagram global lists. The linkage lock protects the * interconnection of unpcbs, the v_socket and unp_vnode pointers, and can be * held exclusively over the acquisition of multiple unpcb locks to prevent * deadlock. * * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer, * allocated in pru_attach() and freed in pru_detach(). The validity of that * pointer is an invariant, so no lock is required to dereference the so_pcb * pointer if a valid socket reference is held by the caller. In practice, * this is always true during operations performed on a socket. Each unpcb * has a back-pointer to its socket, unp_socket, which will be stable under * the same circumstances. * * This pointer may only be safely dereferenced as long as a valid reference * to the unpcb is held. Typically, this reference will be from the socket, * or from another unpcb when the referring unpcb's lock is held (in order * that the reference not be invalidated during use). For example, to follow * unp->unp_conn->unp_socket, you need unlock the lock on unp, not unp_conn, * as unp_socket remains valid as long as the reference to unp_conn is valid. * * Fields of unpcbss are locked using a per-unpcb lock, unp_mtx. Individual * atomic reads without the lock may be performed "lockless", but more * complex reads and read-modify-writes require the mutex to be held. No * lock order is defined between unpcb locks -- multiple unpcb locks may be * acquired at the same time only when holding the linkage rwlock * exclusively, which prevents deadlocks. * * Blocking with UNIX domain sockets is a tricky issue: unlike most network * protocols, bind() is a non-atomic operation, and connect() requires * potential sleeping in the protocol, due to potentially waiting on local or * distributed file systems. We try to separate "lookup" operations, which * may sleep, and the IPC operations themselves, which typically can occur * with relative atomicity as locks can be held over the entire operation. * * Another tricky issue is simultaneous multi-threaded or multi-process * access to a single UNIX domain socket. These are handled by the flags * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or * binding, both of which involve dropping UNIX domain socket locks in order * to perform namei() and other file system operations. */ static struct rwlock unp_link_rwlock; static struct mtx unp_list_lock; static struct mtx unp_defers_lock; #define UNP_LINK_LOCK_INIT() rw_init(&unp_link_rwlock, \ "unp_link_rwlock") #define UNP_LINK_LOCK_ASSERT() rw_assert(&unp_link_rwlock, \ RA_LOCKED) #define UNP_LINK_UNLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ RA_UNLOCKED) #define UNP_LINK_RLOCK() rw_rlock(&unp_link_rwlock) #define UNP_LINK_RUNLOCK() rw_runlock(&unp_link_rwlock) #define UNP_LINK_WLOCK() rw_wlock(&unp_link_rwlock) #define UNP_LINK_WUNLOCK() rw_wunlock(&unp_link_rwlock) #define UNP_LINK_WLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ RA_WLOCKED) #define UNP_LIST_LOCK_INIT() mtx_init(&unp_list_lock, \ "unp_list_lock", NULL, MTX_DEF) #define UNP_LIST_LOCK() mtx_lock(&unp_list_lock) #define UNP_LIST_UNLOCK() mtx_unlock(&unp_list_lock) #define UNP_DEFERRED_LOCK_INIT() mtx_init(&unp_defers_lock, \ "unp_defer", NULL, MTX_DEF) #define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock) #define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock) #define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \ "unp_mtx", "unp_mtx", \ MTX_DUPOK|MTX_DEF|MTX_RECURSE) #define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx) #define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx) #define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx) #define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED) static int uipc_connect2(struct socket *, struct socket *); static int uipc_ctloutput(struct socket *, struct sockopt *); static int unp_connect(struct socket *, struct sockaddr *, struct thread *); static int unp_connectat(int, struct socket *, struct sockaddr *, struct thread *); static int unp_connect2(struct socket *so, struct socket *so2, int); static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2); static void unp_dispose(struct socket *so); static void unp_dispose_mbuf(struct mbuf *); static void unp_shutdown(struct unpcb *); static void unp_drop(struct unpcb *); static void unp_gc(__unused void *, int); static void unp_scan(struct mbuf *, void (*)(struct filedescent **, int)); static void unp_discard(struct file *); static void unp_freerights(struct filedescent **, int); static void unp_init(void); static int unp_internalize(struct mbuf **, struct thread *); static void unp_internalize_fp(struct file *); static int unp_externalize(struct mbuf *, struct mbuf **, int); static int unp_externalize_fp(struct file *); static struct mbuf *unp_addsockcred(struct thread *, struct mbuf *); static void unp_process_defers(void * __unused, int); /* * Definitions of protocols supported in the LOCAL domain. */ static struct domain localdomain; static struct pr_usrreqs uipc_usrreqs_dgram, uipc_usrreqs_stream; static struct pr_usrreqs uipc_usrreqs_seqpacket; static struct protosw localsw[] = { { .pr_type = SOCK_STREAM, .pr_domain = &localdomain, .pr_flags = PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS, .pr_ctloutput = &uipc_ctloutput, .pr_usrreqs = &uipc_usrreqs_stream }, { .pr_type = SOCK_DGRAM, .pr_domain = &localdomain, .pr_flags = PR_ATOMIC|PR_ADDR|PR_RIGHTS, .pr_ctloutput = &uipc_ctloutput, .pr_usrreqs = &uipc_usrreqs_dgram }, { .pr_type = SOCK_SEQPACKET, .pr_domain = &localdomain, /* * XXXRW: For now, PR_ADDR because soreceive will bump into them * due to our use of sbappendaddr. A new sbappend variants is needed * that supports both atomic record writes and control data. */ .pr_flags = PR_ADDR|PR_ATOMIC|PR_CONNREQUIRED|PR_WANTRCVD| PR_RIGHTS, .pr_ctloutput = &uipc_ctloutput, .pr_usrreqs = &uipc_usrreqs_seqpacket, }, }; static struct domain localdomain = { .dom_family = AF_LOCAL, .dom_name = "local", .dom_init = unp_init, .dom_externalize = unp_externalize, .dom_dispose = unp_dispose, .dom_protosw = localsw, .dom_protoswNPROTOSW = &localsw[nitems(localsw)] }; DOMAIN_SET(local); static void uipc_abort(struct socket *so) { struct unpcb *unp, *unp2; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_abort: unp == NULL")); UNP_LINK_WLOCK(); UNP_PCB_LOCK(unp); unp2 = unp->unp_conn; if (unp2 != NULL) { UNP_PCB_LOCK(unp2); unp_drop(unp2); UNP_PCB_UNLOCK(unp2); } UNP_PCB_UNLOCK(unp); UNP_LINK_WUNLOCK(); } static int uipc_accept(struct socket *so, struct sockaddr **nam) { struct unpcb *unp, *unp2; const struct sockaddr *sa; /* * Pass back name of connected socket, if it was bound and we are * still connected (our peer may have closed already!). */ unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_accept: unp == NULL")); *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); UNP_LINK_RLOCK(); unp2 = unp->unp_conn; if (unp2 != NULL && unp2->unp_addr != NULL) { UNP_PCB_LOCK(unp2); sa = (struct sockaddr *) unp2->unp_addr; bcopy(sa, *nam, sa->sa_len); UNP_PCB_UNLOCK(unp2); } else { sa = &sun_noname; bcopy(sa, *nam, sa->sa_len); } UNP_LINK_RUNLOCK(); return (0); } static int uipc_attach(struct socket *so, int proto, struct thread *td) { u_long sendspace, recvspace; struct unpcb *unp; int error; KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL")); if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { switch (so->so_type) { case SOCK_STREAM: sendspace = unpst_sendspace; recvspace = unpst_recvspace; break; case SOCK_DGRAM: sendspace = unpdg_sendspace; recvspace = unpdg_recvspace; break; case SOCK_SEQPACKET: sendspace = unpsp_sendspace; recvspace = unpsp_recvspace; break; default: panic("uipc_attach"); } error = soreserve(so, sendspace, recvspace); if (error) return (error); } unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO); if (unp == NULL) return (ENOBUFS); LIST_INIT(&unp->unp_refs); UNP_PCB_LOCK_INIT(unp); unp->unp_socket = so; so->so_pcb = unp; unp->unp_refcount = 1; if (so->so_head != NULL) unp->unp_flags |= UNP_NASCENT; UNP_LIST_LOCK(); unp->unp_gencnt = ++unp_gencnt; unp_count++; switch (so->so_type) { case SOCK_STREAM: LIST_INSERT_HEAD(&unp_shead, unp, unp_link); break; case SOCK_DGRAM: LIST_INSERT_HEAD(&unp_dhead, unp, unp_link); break; case SOCK_SEQPACKET: LIST_INSERT_HEAD(&unp_sphead, unp, unp_link); break; default: panic("uipc_attach"); } UNP_LIST_UNLOCK(); return (0); } static int uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { struct sockaddr_un *soun = (struct sockaddr_un *)nam; struct vattr vattr; int error, namelen; struct nameidata nd; struct unpcb *unp; struct vnode *vp; struct mount *mp; cap_rights_t rights; char *buf; if (nam->sa_family != AF_UNIX) return (EAFNOSUPPORT); unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_bind: unp == NULL")); if (soun->sun_len > sizeof(struct sockaddr_un)) return (EINVAL); namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); if (namelen <= 0) return (EINVAL); /* * We don't allow simultaneous bind() calls on a single UNIX domain * socket, so flag in-progress operations, and return an error if an * operation is already in progress. * * Historically, we have not allowed a socket to be rebound, so this * also returns an error. Not allowing re-binding simplifies the * implementation and avoids a great many possible failure modes. */ UNP_PCB_LOCK(unp); if (unp->unp_vnode != NULL) { UNP_PCB_UNLOCK(unp); return (EINVAL); } if (unp->unp_flags & UNP_BINDING) { UNP_PCB_UNLOCK(unp); return (EALREADY); } unp->unp_flags |= UNP_BINDING; UNP_PCB_UNLOCK(unp); buf = malloc(namelen + 1, M_TEMP, M_WAITOK); bcopy(soun->sun_path, buf, namelen); buf[namelen] = 0; restart: NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | SAVENAME | NOCACHE, UIO_SYSSPACE, buf, fd, cap_rights_init(&rights, CAP_BINDAT), td); /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ error = namei(&nd); if (error) goto error; vp = nd.ni_vp; if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) { NDFREE(&nd, NDF_ONLY_PNBUF); if (nd.ni_dvp == vp) vrele(nd.ni_dvp); else vput(nd.ni_dvp); if (vp != NULL) { vrele(vp); error = EADDRINUSE; goto error; } error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH); if (error) goto error; goto restart; } VATTR_NULL(&vattr); vattr.va_type = VSOCK; vattr.va_mode = (ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask); #ifdef MAC error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd, &vattr); #endif if (error == 0) error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); NDFREE(&nd, NDF_ONLY_PNBUF); vput(nd.ni_dvp); if (error) { vn_finished_write(mp); goto error; } vp = nd.ni_vp; ASSERT_VOP_ELOCKED(vp, "uipc_bind"); soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK); UNP_LINK_WLOCK(); UNP_PCB_LOCK(unp); - VOP_UNP_BIND(vp, unp->unp_socket); + VOP_UNP_BIND(vp, unp); unp->unp_vnode = vp; unp->unp_addr = soun; unp->unp_flags &= ~UNP_BINDING; UNP_PCB_UNLOCK(unp); UNP_LINK_WUNLOCK(); VOP_UNLOCK(vp, 0); vn_finished_write(mp); free(buf, M_TEMP); return (0); error: UNP_PCB_LOCK(unp); unp->unp_flags &= ~UNP_BINDING; UNP_PCB_UNLOCK(unp); free(buf, M_TEMP); return (error); } static int uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { return (uipc_bindat(AT_FDCWD, so, nam, td)); } static int uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { int error; KASSERT(td == curthread, ("uipc_connect: td != curthread")); UNP_LINK_WLOCK(); error = unp_connect(so, nam, td); UNP_LINK_WUNLOCK(); return (error); } static int uipc_connectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { int error; KASSERT(td == curthread, ("uipc_connectat: td != curthread")); UNP_LINK_WLOCK(); error = unp_connectat(fd, so, nam, td); UNP_LINK_WUNLOCK(); return (error); } static void uipc_close(struct socket *so) { struct unpcb *unp, *unp2; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_close: unp == NULL")); UNP_LINK_WLOCK(); UNP_PCB_LOCK(unp); unp2 = unp->unp_conn; if (unp2 != NULL) { UNP_PCB_LOCK(unp2); unp_disconnect(unp, unp2); UNP_PCB_UNLOCK(unp2); } UNP_PCB_UNLOCK(unp); UNP_LINK_WUNLOCK(); } static int uipc_connect2(struct socket *so1, struct socket *so2) { struct unpcb *unp, *unp2; int error; UNP_LINK_WLOCK(); unp = so1->so_pcb; KASSERT(unp != NULL, ("uipc_connect2: unp == NULL")); UNP_PCB_LOCK(unp); unp2 = so2->so_pcb; KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL")); UNP_PCB_LOCK(unp2); error = unp_connect2(so1, so2, PRU_CONNECT2); UNP_PCB_UNLOCK(unp2); UNP_PCB_UNLOCK(unp); UNP_LINK_WUNLOCK(); return (error); } static void uipc_detach(struct socket *so) { struct unpcb *unp, *unp2; struct sockaddr_un *saved_unp_addr; struct vnode *vp; int freeunp, local_unp_rights; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_detach: unp == NULL")); vp = NULL; local_unp_rights = 0; UNP_LIST_LOCK(); LIST_REMOVE(unp, unp_link); unp->unp_gencnt = ++unp_gencnt; --unp_count; UNP_LIST_UNLOCK(); if ((unp->unp_flags & UNP_NASCENT) != 0) { UNP_PCB_LOCK(unp); goto teardown; } UNP_LINK_WLOCK(); UNP_PCB_LOCK(unp); - /* - * XXXRW: Should assert vp->v_socket == so. - */ if ((vp = unp->unp_vnode) != NULL) { VOP_UNP_DETACH(vp); unp->unp_vnode = NULL; } unp2 = unp->unp_conn; if (unp2 != NULL) { UNP_PCB_LOCK(unp2); unp_disconnect(unp, unp2); UNP_PCB_UNLOCK(unp2); } /* * We hold the linkage lock exclusively, so it's OK to acquire * multiple pcb locks at a time. */ while (!LIST_EMPTY(&unp->unp_refs)) { struct unpcb *ref = LIST_FIRST(&unp->unp_refs); UNP_PCB_LOCK(ref); unp_drop(ref); UNP_PCB_UNLOCK(ref); } local_unp_rights = unp_rights; UNP_LINK_WUNLOCK(); teardown: unp->unp_socket->so_pcb = NULL; saved_unp_addr = unp->unp_addr; unp->unp_addr = NULL; unp->unp_refcount--; freeunp = (unp->unp_refcount == 0); if (saved_unp_addr != NULL) free(saved_unp_addr, M_SONAME); if (freeunp) { UNP_PCB_LOCK_DESTROY(unp); uma_zfree(unp_zone, unp); } else UNP_PCB_UNLOCK(unp); if (vp) vrele(vp); if (local_unp_rights) taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1); } static int uipc_disconnect(struct socket *so) { struct unpcb *unp, *unp2; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL")); UNP_LINK_WLOCK(); UNP_PCB_LOCK(unp); unp2 = unp->unp_conn; if (unp2 != NULL) { UNP_PCB_LOCK(unp2); unp_disconnect(unp, unp2); UNP_PCB_UNLOCK(unp2); } UNP_PCB_UNLOCK(unp); UNP_LINK_WUNLOCK(); return (0); } static int uipc_listen(struct socket *so, int backlog, struct thread *td) { struct unpcb *unp; int error; if (so->so_type != SOCK_STREAM && so->so_type != SOCK_SEQPACKET) return (EOPNOTSUPP); unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_listen: unp == NULL")); UNP_PCB_LOCK(unp); if (unp->unp_vnode == NULL) { /* Already connected or not bound to an address. */ error = unp->unp_conn != NULL ? EINVAL : EDESTADDRREQ; UNP_PCB_UNLOCK(unp); return (error); } SOCK_LOCK(so); error = solisten_proto_check(so); if (error == 0) { cru2x(td->td_ucred, &unp->unp_peercred); - unp->unp_flags |= UNP_HAVEPCCACHED; solisten_proto(so, backlog); } SOCK_UNLOCK(so); UNP_PCB_UNLOCK(unp); return (error); } static int uipc_peeraddr(struct socket *so, struct sockaddr **nam) { struct unpcb *unp, *unp2; const struct sockaddr *sa; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL")); *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); UNP_LINK_RLOCK(); /* * XXX: It seems that this test always fails even when connection is * established. So, this else clause is added as workaround to * return PF_LOCAL sockaddr. */ unp2 = unp->unp_conn; if (unp2 != NULL) { UNP_PCB_LOCK(unp2); if (unp2->unp_addr != NULL) sa = (struct sockaddr *) unp2->unp_addr; else sa = &sun_noname; bcopy(sa, *nam, sa->sa_len); UNP_PCB_UNLOCK(unp2); } else { sa = &sun_noname; bcopy(sa, *nam, sa->sa_len); } UNP_LINK_RUNLOCK(); return (0); } static int uipc_rcvd(struct socket *so, int flags) { struct unpcb *unp, *unp2; struct socket *so2; u_int mbcnt, sbcc; unp = sotounpcb(so); KASSERT(unp != NULL, ("%s: unp == NULL", __func__)); KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET, ("%s: socktype %d", __func__, so->so_type)); /* * Adjust backpressure on sender and wakeup any waiting to write. * * The unp lock is acquired to maintain the validity of the unp_conn * pointer; no lock on unp2 is required as unp2->unp_socket will be * static as long as we don't permit unp2 to disconnect from unp, * which is prevented by the lock on unp. We cache values from * so_rcv to avoid holding the so_rcv lock over the entire * transaction on the remote so_snd. */ SOCKBUF_LOCK(&so->so_rcv); mbcnt = so->so_rcv.sb_mbcnt; sbcc = sbavail(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); /* * There is a benign race condition at this point. If we're planning to * clear SB_STOP, but uipc_send is called on the connected socket at * this instant, it might add data to the sockbuf and set SB_STOP. Then * we would erroneously clear SB_STOP below, even though the sockbuf is * full. The race is benign because the only ill effect is to allow the * sockbuf to exceed its size limit, and the size limits are not * strictly guaranteed anyway. */ UNP_PCB_LOCK(unp); unp2 = unp->unp_conn; if (unp2 == NULL) { UNP_PCB_UNLOCK(unp); return (0); } so2 = unp2->unp_socket; SOCKBUF_LOCK(&so2->so_snd); if (sbcc < so2->so_snd.sb_hiwat && mbcnt < so2->so_snd.sb_mbmax) so2->so_snd.sb_flags &= ~SB_STOP; sowwakeup_locked(so2); UNP_PCB_UNLOCK(unp); return (0); } static int uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { struct unpcb *unp, *unp2; struct socket *so2; u_int mbcnt, sbcc; int error = 0; unp = sotounpcb(so); KASSERT(unp != NULL, ("%s: unp == NULL", __func__)); KASSERT(so->so_type == SOCK_STREAM || so->so_type == SOCK_DGRAM || so->so_type == SOCK_SEQPACKET, ("%s: socktype %d", __func__, so->so_type)); if (flags & PRUS_OOB) { error = EOPNOTSUPP; goto release; } if (control != NULL && (error = unp_internalize(&control, td))) goto release; if ((nam != NULL) || (flags & PRUS_EOF)) UNP_LINK_WLOCK(); else UNP_LINK_RLOCK(); switch (so->so_type) { case SOCK_DGRAM: { const struct sockaddr *from; unp2 = unp->unp_conn; if (nam != NULL) { UNP_LINK_WLOCK_ASSERT(); if (unp2 != NULL) { error = EISCONN; break; } error = unp_connect(so, nam, td); if (error) break; unp2 = unp->unp_conn; } /* * Because connect() and send() are non-atomic in a sendto() * with a target address, it's possible that the socket will * have disconnected before the send() can run. In that case * return the slightly counter-intuitive but otherwise * correct error that the socket is not connected. */ if (unp2 == NULL) { error = ENOTCONN; break; } /* Lockless read. */ if (unp2->unp_flags & UNP_WANTCRED) control = unp_addsockcred(td, control); UNP_PCB_LOCK(unp); if (unp->unp_addr != NULL) from = (struct sockaddr *)unp->unp_addr; else from = &sun_noname; so2 = unp2->unp_socket; SOCKBUF_LOCK(&so2->so_rcv); if (sbappendaddr_locked(&so2->so_rcv, from, m, control)) { sorwakeup_locked(so2); m = NULL; control = NULL; } else { SOCKBUF_UNLOCK(&so2->so_rcv); error = ENOBUFS; } if (nam != NULL) { UNP_LINK_WLOCK_ASSERT(); UNP_PCB_LOCK(unp2); unp_disconnect(unp, unp2); UNP_PCB_UNLOCK(unp2); } UNP_PCB_UNLOCK(unp); break; } case SOCK_SEQPACKET: case SOCK_STREAM: if ((so->so_state & SS_ISCONNECTED) == 0) { if (nam != NULL) { UNP_LINK_WLOCK_ASSERT(); error = unp_connect(so, nam, td); if (error) break; /* XXX */ } else { error = ENOTCONN; break; } } /* Lockless read. */ if (so->so_snd.sb_state & SBS_CANTSENDMORE) { error = EPIPE; break; } /* * Because connect() and send() are non-atomic in a sendto() * with a target address, it's possible that the socket will * have disconnected before the send() can run. In that case * return the slightly counter-intuitive but otherwise * correct error that the socket is not connected. * * Locking here must be done carefully: the linkage lock * prevents interconnections between unpcbs from changing, so * we can traverse from unp to unp2 without acquiring unp's * lock. Socket buffer locks follow unpcb locks, so we can * acquire both remote and lock socket buffer locks. */ unp2 = unp->unp_conn; if (unp2 == NULL) { error = ENOTCONN; break; } so2 = unp2->unp_socket; UNP_PCB_LOCK(unp2); SOCKBUF_LOCK(&so2->so_rcv); if (unp2->unp_flags & UNP_WANTCRED) { /* * Credentials are passed only once on SOCK_STREAM * and SOCK_SEQPACKET. */ unp2->unp_flags &= ~UNP_WANTCRED; control = unp_addsockcred(td, control); } /* * Send to paired receive port, and then reduce send buffer * hiwater marks to maintain backpressure. Wake up readers. */ switch (so->so_type) { case SOCK_STREAM: if (control != NULL) { if (sbappendcontrol_locked(&so2->so_rcv, m, control)) control = NULL; } else sbappend_locked(&so2->so_rcv, m, flags); break; case SOCK_SEQPACKET: { const struct sockaddr *from; from = &sun_noname; /* * Don't check for space available in so2->so_rcv. * Unix domain sockets only check for space in the * sending sockbuf, and that check is performed one * level up the stack. */ if (sbappendaddr_nospacecheck_locked(&so2->so_rcv, from, m, control)) control = NULL; break; } } mbcnt = so2->so_rcv.sb_mbcnt; sbcc = sbavail(&so2->so_rcv); if (sbcc) sorwakeup_locked(so2); else SOCKBUF_UNLOCK(&so2->so_rcv); /* * The PCB lock on unp2 protects the SB_STOP flag. Without it, * it would be possible for uipc_rcvd to be called at this * point, drain the receiving sockbuf, clear SB_STOP, and then * we would set SB_STOP below. That could lead to an empty * sockbuf having SB_STOP set */ SOCKBUF_LOCK(&so->so_snd); if (sbcc >= so->so_snd.sb_hiwat || mbcnt >= so->so_snd.sb_mbmax) so->so_snd.sb_flags |= SB_STOP; SOCKBUF_UNLOCK(&so->so_snd); UNP_PCB_UNLOCK(unp2); m = NULL; break; } /* * PRUS_EOF is equivalent to pru_send followed by pru_shutdown. */ if (flags & PRUS_EOF) { UNP_PCB_LOCK(unp); socantsendmore(so); unp_shutdown(unp); UNP_PCB_UNLOCK(unp); } if ((nam != NULL) || (flags & PRUS_EOF)) UNP_LINK_WUNLOCK(); else UNP_LINK_RUNLOCK(); if (control != NULL && error != 0) unp_dispose_mbuf(control); release: if (control != NULL) m_freem(control); if (m != NULL) m_freem(m); return (error); } static int uipc_ready(struct socket *so, struct mbuf *m, int count) { struct unpcb *unp, *unp2; struct socket *so2; int error; unp = sotounpcb(so); UNP_LINK_RLOCK(); unp2 = unp->unp_conn; UNP_PCB_LOCK(unp2); so2 = unp2->unp_socket; SOCKBUF_LOCK(&so2->so_rcv); if ((error = sbready(&so2->so_rcv, m, count)) == 0) sorwakeup_locked(so2); else SOCKBUF_UNLOCK(&so2->so_rcv); UNP_PCB_UNLOCK(unp2); UNP_LINK_RUNLOCK(); return (error); } static int uipc_sense(struct socket *so, struct stat *sb) { struct unpcb *unp; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_sense: unp == NULL")); sb->st_blksize = so->so_snd.sb_hiwat; UNP_PCB_LOCK(unp); sb->st_dev = NODEV; if (unp->unp_ino == 0) unp->unp_ino = (++unp_ino == 0) ? ++unp_ino : unp_ino; sb->st_ino = unp->unp_ino; UNP_PCB_UNLOCK(unp); return (0); } static int uipc_shutdown(struct socket *so) { struct unpcb *unp; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_shutdown: unp == NULL")); UNP_LINK_WLOCK(); UNP_PCB_LOCK(unp); socantsendmore(so); unp_shutdown(unp); UNP_PCB_UNLOCK(unp); UNP_LINK_WUNLOCK(); return (0); } static int uipc_sockaddr(struct socket *so, struct sockaddr **nam) { struct unpcb *unp; const struct sockaddr *sa; unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL")); *nam = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); UNP_PCB_LOCK(unp); if (unp->unp_addr != NULL) sa = (struct sockaddr *) unp->unp_addr; else sa = &sun_noname; bcopy(sa, *nam, sa->sa_len); UNP_PCB_UNLOCK(unp); return (0); } static struct pr_usrreqs uipc_usrreqs_dgram = { .pru_abort = uipc_abort, .pru_accept = uipc_accept, .pru_attach = uipc_attach, .pru_bind = uipc_bind, .pru_bindat = uipc_bindat, .pru_connect = uipc_connect, .pru_connectat = uipc_connectat, .pru_connect2 = uipc_connect2, .pru_detach = uipc_detach, .pru_disconnect = uipc_disconnect, .pru_listen = uipc_listen, .pru_peeraddr = uipc_peeraddr, .pru_rcvd = uipc_rcvd, .pru_send = uipc_send, .pru_sense = uipc_sense, .pru_shutdown = uipc_shutdown, .pru_sockaddr = uipc_sockaddr, .pru_soreceive = soreceive_dgram, .pru_close = uipc_close, }; static struct pr_usrreqs uipc_usrreqs_seqpacket = { .pru_abort = uipc_abort, .pru_accept = uipc_accept, .pru_attach = uipc_attach, .pru_bind = uipc_bind, .pru_bindat = uipc_bindat, .pru_connect = uipc_connect, .pru_connectat = uipc_connectat, .pru_connect2 = uipc_connect2, .pru_detach = uipc_detach, .pru_disconnect = uipc_disconnect, .pru_listen = uipc_listen, .pru_peeraddr = uipc_peeraddr, .pru_rcvd = uipc_rcvd, .pru_send = uipc_send, .pru_sense = uipc_sense, .pru_shutdown = uipc_shutdown, .pru_sockaddr = uipc_sockaddr, .pru_soreceive = soreceive_generic, /* XXX: or...? */ .pru_close = uipc_close, }; static struct pr_usrreqs uipc_usrreqs_stream = { .pru_abort = uipc_abort, .pru_accept = uipc_accept, .pru_attach = uipc_attach, .pru_bind = uipc_bind, .pru_bindat = uipc_bindat, .pru_connect = uipc_connect, .pru_connectat = uipc_connectat, .pru_connect2 = uipc_connect2, .pru_detach = uipc_detach, .pru_disconnect = uipc_disconnect, .pru_listen = uipc_listen, .pru_peeraddr = uipc_peeraddr, .pru_rcvd = uipc_rcvd, .pru_send = uipc_send, .pru_ready = uipc_ready, .pru_sense = uipc_sense, .pru_shutdown = uipc_shutdown, .pru_sockaddr = uipc_sockaddr, .pru_soreceive = soreceive_generic, .pru_close = uipc_close, }; static int uipc_ctloutput(struct socket *so, struct sockopt *sopt) { struct unpcb *unp; struct xucred xu; int error, optval; if (sopt->sopt_level != 0) return (EINVAL); unp = sotounpcb(so); KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL")); error = 0; switch (sopt->sopt_dir) { case SOPT_GET: switch (sopt->sopt_name) { case LOCAL_PEERCRED: UNP_PCB_LOCK(unp); if (unp->unp_flags & UNP_HAVEPC) xu = unp->unp_peercred; else { if (so->so_type == SOCK_STREAM) error = ENOTCONN; else error = EINVAL; } UNP_PCB_UNLOCK(unp); if (error == 0) error = sooptcopyout(sopt, &xu, sizeof(xu)); break; case LOCAL_CREDS: /* Unlocked read. */ optval = unp->unp_flags & UNP_WANTCRED ? 1 : 0; error = sooptcopyout(sopt, &optval, sizeof(optval)); break; case LOCAL_CONNWAIT: /* Unlocked read. */ optval = unp->unp_flags & UNP_CONNWAIT ? 1 : 0; error = sooptcopyout(sopt, &optval, sizeof(optval)); break; default: error = EOPNOTSUPP; break; } break; case SOPT_SET: switch (sopt->sopt_name) { case LOCAL_CREDS: case LOCAL_CONNWAIT: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); if (error) break; #define OPTSET(bit) do { \ UNP_PCB_LOCK(unp); \ if (optval) \ unp->unp_flags |= bit; \ else \ unp->unp_flags &= ~bit; \ UNP_PCB_UNLOCK(unp); \ } while (0) switch (sopt->sopt_name) { case LOCAL_CREDS: OPTSET(UNP_WANTCRED); break; case LOCAL_CONNWAIT: OPTSET(UNP_CONNWAIT); break; default: break; } break; #undef OPTSET default: error = ENOPROTOOPT; break; } break; default: error = EOPNOTSUPP; break; } return (error); } static int unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { return (unp_connectat(AT_FDCWD, so, nam, td)); } static int unp_connectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) { struct sockaddr_un *soun = (struct sockaddr_un *)nam; struct vnode *vp; struct socket *so2, *so3; struct unpcb *unp, *unp2, *unp3; struct nameidata nd; char buf[SOCK_MAXADDRLEN]; struct sockaddr *sa; cap_rights_t rights; int error, len; if (nam->sa_family != AF_UNIX) return (EAFNOSUPPORT); UNP_LINK_WLOCK_ASSERT(); unp = sotounpcb(so); KASSERT(unp != NULL, ("unp_connect: unp == NULL")); if (nam->sa_len > sizeof(struct sockaddr_un)) return (EINVAL); len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); if (len <= 0) return (EINVAL); bcopy(soun->sun_path, buf, len); buf[len] = 0; UNP_PCB_LOCK(unp); if (unp->unp_flags & UNP_CONNECTING) { UNP_PCB_UNLOCK(unp); return (EALREADY); } UNP_LINK_WUNLOCK(); unp->unp_flags |= UNP_CONNECTING; UNP_PCB_UNLOCK(unp); sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, buf, fd, cap_rights_init(&rights, CAP_CONNECTAT), td); error = namei(&nd); if (error) vp = NULL; else vp = nd.ni_vp; ASSERT_VOP_LOCKED(vp, "unp_connect"); NDFREE(&nd, NDF_ONLY_PNBUF); if (error) goto bad; if (vp->v_type != VSOCK) { error = ENOTSOCK; goto bad; } #ifdef MAC error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD); if (error) goto bad; #endif error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td); if (error) goto bad; unp = sotounpcb(so); KASSERT(unp != NULL, ("unp_connect: unp == NULL")); /* * Lock linkage lock for two reasons: make sure v_socket is stable, * and to protect simultaneous locking of multiple pcbs. */ UNP_LINK_WLOCK(); - VOP_UNP_CONNECT(vp, &so2); - if (so2 == NULL) { + VOP_UNP_CONNECT(vp, &unp2); + if (unp2 == NULL) { error = ECONNREFUSED; goto bad2; } + so2 = unp2->unp_socket; if (so->so_type != so2->so_type) { error = EPROTOTYPE; goto bad2; } if (so->so_proto->pr_flags & PR_CONNREQUIRED) { if (so2->so_options & SO_ACCEPTCONN) { CURVNET_SET(so2->so_vnet); so3 = sonewconn(so2, 0); CURVNET_RESTORE(); } else so3 = NULL; if (so3 == NULL) { error = ECONNREFUSED; goto bad2; } unp = sotounpcb(so); unp2 = sotounpcb(so2); unp3 = sotounpcb(so3); UNP_PCB_LOCK(unp); UNP_PCB_LOCK(unp2); UNP_PCB_LOCK(unp3); if (unp2->unp_addr != NULL) { bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len); unp3->unp_addr = (struct sockaddr_un *) sa; sa = NULL; } /* * The connector's (client's) credentials are copied from its * process structure at the time of connect() (which is now). */ cru2x(td->td_ucred, &unp3->unp_peercred); unp3->unp_flags |= UNP_HAVEPC; /* * The receiver's (server's) credentials are copied from the * unp_peercred member of socket on which the former called * listen(); uipc_listen() cached that process's credentials * at that time so we can use them now. */ - KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED, - ("unp_connect: listener without cached peercred")); memcpy(&unp->unp_peercred, &unp2->unp_peercred, sizeof(unp->unp_peercred)); unp->unp_flags |= UNP_HAVEPC; if (unp2->unp_flags & UNP_WANTCRED) unp3->unp_flags |= UNP_WANTCRED; UNP_PCB_UNLOCK(unp3); UNP_PCB_UNLOCK(unp2); UNP_PCB_UNLOCK(unp); #ifdef MAC mac_socketpeer_set_from_socket(so, so3); mac_socketpeer_set_from_socket(so3, so); #endif so2 = so3; } unp = sotounpcb(so); KASSERT(unp != NULL, ("unp_connect: unp == NULL")); unp2 = sotounpcb(so2); KASSERT(unp2 != NULL, ("unp_connect: unp2 == NULL")); UNP_PCB_LOCK(unp); UNP_PCB_LOCK(unp2); error = unp_connect2(so, so2, PRU_CONNECT); UNP_PCB_UNLOCK(unp2); UNP_PCB_UNLOCK(unp); bad2: UNP_LINK_WUNLOCK(); bad: if (vp != NULL) vput(vp); free(sa, M_SONAME); UNP_LINK_WLOCK(); UNP_PCB_LOCK(unp); unp->unp_flags &= ~UNP_CONNECTING; UNP_PCB_UNLOCK(unp); return (error); } static int unp_connect2(struct socket *so, struct socket *so2, int req) { struct unpcb *unp; struct unpcb *unp2; unp = sotounpcb(so); KASSERT(unp != NULL, ("unp_connect2: unp == NULL")); unp2 = sotounpcb(so2); KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL")); UNP_LINK_WLOCK_ASSERT(); UNP_PCB_LOCK_ASSERT(unp); UNP_PCB_LOCK_ASSERT(unp2); if (so2->so_type != so->so_type) return (EPROTOTYPE); unp2->unp_flags &= ~UNP_NASCENT; unp->unp_conn = unp2; switch (so->so_type) { case SOCK_DGRAM: LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); soisconnected(so); break; case SOCK_STREAM: case SOCK_SEQPACKET: unp2->unp_conn = unp; if (req == PRU_CONNECT && ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT)) soisconnecting(so); else soisconnected(so); soisconnected(so2); break; default: panic("unp_connect2"); } return (0); } static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2) { struct socket *so; KASSERT(unp2 != NULL, ("unp_disconnect: unp2 == NULL")); UNP_LINK_WLOCK_ASSERT(); UNP_PCB_LOCK_ASSERT(unp); UNP_PCB_LOCK_ASSERT(unp2); unp->unp_conn = NULL; switch (unp->unp_socket->so_type) { case SOCK_DGRAM: LIST_REMOVE(unp, unp_reflink); so = unp->unp_socket; SOCK_LOCK(so); so->so_state &= ~SS_ISCONNECTED; SOCK_UNLOCK(so); break; case SOCK_STREAM: case SOCK_SEQPACKET: soisdisconnected(unp->unp_socket); unp2->unp_conn = NULL; soisdisconnected(unp2->unp_socket); break; } } /* * unp_pcblist() walks the global list of struct unpcb's to generate a * pointer list, bumping the refcount on each unpcb. It then copies them out * sequentially, validating the generation number on each to see if it has * been detached. All of this is necessary because copyout() may sleep on * disk I/O. */ static int unp_pcblist(SYSCTL_HANDLER_ARGS) { int error, i, n; int freeunp; struct unpcb *unp, **unp_list; unp_gen_t gencnt; struct xunpgen *xug; struct unp_head *head; struct xunpcb *xu; switch ((intptr_t)arg1) { case SOCK_STREAM: head = &unp_shead; break; case SOCK_DGRAM: head = &unp_dhead; break; case SOCK_SEQPACKET: head = &unp_sphead; break; default: panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1); } /* * The process of preparing the PCB list is too time-consuming and * resource-intensive to repeat twice on every request. */ if (req->oldptr == NULL) { n = unp_count; req->oldidx = 2 * (sizeof *xug) + (n + n/8) * sizeof(struct xunpcb); return (0); } if (req->newptr != NULL) return (EPERM); /* * OK, now we're committed to doing something. */ xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK); UNP_LIST_LOCK(); gencnt = unp_gencnt; n = unp_count; UNP_LIST_UNLOCK(); xug->xug_len = sizeof *xug; xug->xug_count = n; xug->xug_gen = gencnt; xug->xug_sogen = so_gencnt; error = SYSCTL_OUT(req, xug, sizeof *xug); if (error) { free(xug, M_TEMP); return (error); } unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK); UNP_LIST_LOCK(); for (unp = LIST_FIRST(head), i = 0; unp && i < n; unp = LIST_NEXT(unp, unp_link)) { UNP_PCB_LOCK(unp); if (unp->unp_gencnt <= gencnt) { if (cr_cansee(req->td->td_ucred, unp->unp_socket->so_cred)) { UNP_PCB_UNLOCK(unp); continue; } unp_list[i++] = unp; unp->unp_refcount++; } UNP_PCB_UNLOCK(unp); } UNP_LIST_UNLOCK(); n = i; /* In case we lost some during malloc. */ error = 0; xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO); for (i = 0; i < n; i++) { unp = unp_list[i]; UNP_PCB_LOCK(unp); unp->unp_refcount--; if (unp->unp_refcount != 0 && unp->unp_gencnt <= gencnt) { xu->xu_len = sizeof *xu; xu->xu_unpp = unp; /* * XXX - need more locking here to protect against * connect/disconnect races for SMP. */ if (unp->unp_addr != NULL) bcopy(unp->unp_addr, &xu->xu_addr, unp->unp_addr->sun_len); if (unp->unp_conn != NULL && unp->unp_conn->unp_addr != NULL) bcopy(unp->unp_conn->unp_addr, &xu->xu_caddr, unp->unp_conn->unp_addr->sun_len); bcopy(unp, &xu->xu_unp, sizeof *unp); sotoxsocket(unp->unp_socket, &xu->xu_socket); UNP_PCB_UNLOCK(unp); error = SYSCTL_OUT(req, xu, sizeof *xu); } else { freeunp = (unp->unp_refcount == 0); UNP_PCB_UNLOCK(unp); if (freeunp) { UNP_PCB_LOCK_DESTROY(unp); uma_zfree(unp_zone, unp); } } } free(xu, M_TEMP); if (!error) { /* * Give the user an updated idea of our state. If the * generation differs from what we told her before, she knows * that something happened while we were processing this * request, and it might be necessary to retry. */ xug->xug_gen = unp_gencnt; xug->xug_sogen = so_gencnt; xug->xug_count = unp_count; error = SYSCTL_OUT(req, xug, sizeof *xug); } free(unp_list, M_TEMP); free(xug, M_TEMP); return (error); } SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLTYPE_OPAQUE | CTLFLAG_RD, (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", "List of active local datagram sockets"); SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLTYPE_OPAQUE | CTLFLAG_RD, (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", "List of active local stream sockets"); SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLTYPE_OPAQUE | CTLFLAG_RD, (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb", "List of active local seqpacket sockets"); static void unp_shutdown(struct unpcb *unp) { struct unpcb *unp2; struct socket *so; UNP_LINK_WLOCK_ASSERT(); UNP_PCB_LOCK_ASSERT(unp); unp2 = unp->unp_conn; if ((unp->unp_socket->so_type == SOCK_STREAM || (unp->unp_socket->so_type == SOCK_SEQPACKET)) && unp2 != NULL) { so = unp2->unp_socket; if (so != NULL) socantrcvmore(so); } } static void unp_drop(struct unpcb *unp) { struct socket *so = unp->unp_socket; struct unpcb *unp2; UNP_LINK_WLOCK_ASSERT(); UNP_PCB_LOCK_ASSERT(unp); /* * Regardless of whether the socket's peer dropped the connection * with this socket by aborting or disconnecting, POSIX requires * that ECONNRESET is returned. */ so->so_error = ECONNRESET; unp2 = unp->unp_conn; if (unp2 == NULL) return; UNP_PCB_LOCK(unp2); unp_disconnect(unp, unp2); UNP_PCB_UNLOCK(unp2); } static void unp_freerights(struct filedescent **fdep, int fdcount) { struct file *fp; int i; KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount)); for (i = 0; i < fdcount; i++) { fp = fdep[i]->fde_file; filecaps_free(&fdep[i]->fde_caps); unp_discard(fp); } free(fdep[0], M_FILECAPS); } static int unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags) { struct thread *td = curthread; /* XXX */ struct cmsghdr *cm = mtod(control, struct cmsghdr *); int i; int *fdp; struct filedesc *fdesc = td->td_proc->p_fd; struct filedescent **fdep; void *data; socklen_t clen = control->m_len, datalen; int error, newfds; u_int newlen; UNP_LINK_UNLOCK_ASSERT(); error = 0; if (controlp != NULL) /* controlp == NULL => free control messages */ *controlp = NULL; while (cm != NULL) { if (sizeof(*cm) > clen || cm->cmsg_len > clen) { error = EINVAL; break; } data = CMSG_DATA(cm); datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS) { newfds = datalen / sizeof(*fdep); if (newfds == 0) goto next; fdep = data; /* If we're not outputting the descriptors free them. */ if (error || controlp == NULL) { unp_freerights(fdep, newfds); goto next; } FILEDESC_XLOCK(fdesc); /* * Now change each pointer to an fd in the global * table to an integer that is the index to the local * fd table entry that we set up to point to the * global one we are transferring. */ newlen = newfds * sizeof(int); *controlp = sbcreatecontrol(NULL, newlen, SCM_RIGHTS, SOL_SOCKET); if (*controlp == NULL) { FILEDESC_XUNLOCK(fdesc); error = E2BIG; unp_freerights(fdep, newfds); goto next; } fdp = (int *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); if (fdallocn(td, 0, fdp, newfds) != 0) { FILEDESC_XUNLOCK(fdesc); error = EMSGSIZE; unp_freerights(fdep, newfds); m_freem(*controlp); *controlp = NULL; goto next; } for (i = 0; i < newfds; i++, fdp++) { _finstall(fdesc, fdep[i]->fde_file, *fdp, (flags & MSG_CMSG_CLOEXEC) != 0 ? UF_EXCLOSE : 0, &fdep[i]->fde_caps); unp_externalize_fp(fdep[i]->fde_file); } FILEDESC_XUNLOCK(fdesc); free(fdep[0], M_FILECAPS); } else { /* We can just copy anything else across. */ if (error || controlp == NULL) goto next; *controlp = sbcreatecontrol(NULL, datalen, cm->cmsg_type, cm->cmsg_level); if (*controlp == NULL) { error = ENOBUFS; goto next; } bcopy(data, CMSG_DATA(mtod(*controlp, struct cmsghdr *)), datalen); } controlp = &(*controlp)->m_next; next: if (CMSG_SPACE(datalen) < clen) { clen -= CMSG_SPACE(datalen); cm = (struct cmsghdr *) ((caddr_t)cm + CMSG_SPACE(datalen)); } else { clen = 0; cm = NULL; } } m_freem(control); return (error); } static void unp_zone_change(void *tag) { uma_zone_set_max(unp_zone, maxsockets); } static void unp_init(void) { #ifdef VIMAGE if (!IS_DEFAULT_VNET(curvnet)) return; #endif unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); if (unp_zone == NULL) panic("unp_init"); uma_zone_set_max(unp_zone, maxsockets); uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached"); EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change, NULL, EVENTHANDLER_PRI_ANY); LIST_INIT(&unp_dhead); LIST_INIT(&unp_shead); LIST_INIT(&unp_sphead); SLIST_INIT(&unp_defers); TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL); TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL); UNP_LINK_LOCK_INIT(); UNP_LIST_LOCK_INIT(); UNP_DEFERRED_LOCK_INIT(); } static int unp_internalize(struct mbuf **controlp, struct thread *td) { struct mbuf *control = *controlp; struct proc *p = td->td_proc; struct filedesc *fdesc = p->p_fd; struct bintime *bt; struct cmsghdr *cm = mtod(control, struct cmsghdr *); struct cmsgcred *cmcred; struct filedescent *fde, **fdep, *fdev; struct file *fp; struct timeval *tv; struct timespec *ts; int i, *fdp; void *data; socklen_t clen = control->m_len, datalen; int error, oldfds; u_int newlen; UNP_LINK_UNLOCK_ASSERT(); error = 0; *controlp = NULL; while (cm != NULL) { if (sizeof(*cm) > clen || cm->cmsg_level != SOL_SOCKET || cm->cmsg_len > clen || cm->cmsg_len < sizeof(*cm)) { error = EINVAL; goto out; } data = CMSG_DATA(cm); datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; switch (cm->cmsg_type) { /* * Fill in credential information. */ case SCM_CREDS: *controlp = sbcreatecontrol(NULL, sizeof(*cmcred), SCM_CREDS, SOL_SOCKET); if (*controlp == NULL) { error = ENOBUFS; goto out; } cmcred = (struct cmsgcred *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); cmcred->cmcred_pid = p->p_pid; cmcred->cmcred_uid = td->td_ucred->cr_ruid; cmcred->cmcred_gid = td->td_ucred->cr_rgid; cmcred->cmcred_euid = td->td_ucred->cr_uid; cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX); for (i = 0; i < cmcred->cmcred_ngroups; i++) cmcred->cmcred_groups[i] = td->td_ucred->cr_groups[i]; break; case SCM_RIGHTS: oldfds = datalen / sizeof (int); if (oldfds == 0) break; /* * Check that all the FDs passed in refer to legal * files. If not, reject the entire operation. */ fdp = data; FILEDESC_SLOCK(fdesc); for (i = 0; i < oldfds; i++, fdp++) { fp = fget_locked(fdesc, *fdp); if (fp == NULL) { FILEDESC_SUNLOCK(fdesc); error = EBADF; goto out; } if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) { FILEDESC_SUNLOCK(fdesc); error = EOPNOTSUPP; goto out; } } /* * Now replace the integer FDs with pointers to the * file structure and capability rights. */ newlen = oldfds * sizeof(fdep[0]); *controlp = sbcreatecontrol(NULL, newlen, SCM_RIGHTS, SOL_SOCKET); if (*controlp == NULL) { FILEDESC_SUNLOCK(fdesc); error = E2BIG; goto out; } fdp = data; fdep = (struct filedescent **) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS, M_WAITOK); for (i = 0; i < oldfds; i++, fdev++, fdp++) { fde = &fdesc->fd_ofiles[*fdp]; fdep[i] = fdev; fdep[i]->fde_file = fde->fde_file; filecaps_copy(&fde->fde_caps, &fdep[i]->fde_caps, true); unp_internalize_fp(fdep[i]->fde_file); } FILEDESC_SUNLOCK(fdesc); break; case SCM_TIMESTAMP: *controlp = sbcreatecontrol(NULL, sizeof(*tv), SCM_TIMESTAMP, SOL_SOCKET); if (*controlp == NULL) { error = ENOBUFS; goto out; } tv = (struct timeval *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); microtime(tv); break; case SCM_BINTIME: *controlp = sbcreatecontrol(NULL, sizeof(*bt), SCM_BINTIME, SOL_SOCKET); if (*controlp == NULL) { error = ENOBUFS; goto out; } bt = (struct bintime *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); bintime(bt); break; case SCM_REALTIME: *controlp = sbcreatecontrol(NULL, sizeof(*ts), SCM_REALTIME, SOL_SOCKET); if (*controlp == NULL) { error = ENOBUFS; goto out; } ts = (struct timespec *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); nanotime(ts); break; case SCM_MONOTONIC: *controlp = sbcreatecontrol(NULL, sizeof(*ts), SCM_MONOTONIC, SOL_SOCKET); if (*controlp == NULL) { error = ENOBUFS; goto out; } ts = (struct timespec *) CMSG_DATA(mtod(*controlp, struct cmsghdr *)); nanouptime(ts); break; default: error = EINVAL; goto out; } controlp = &(*controlp)->m_next; if (CMSG_SPACE(datalen) < clen) { clen -= CMSG_SPACE(datalen); cm = (struct cmsghdr *) ((caddr_t)cm + CMSG_SPACE(datalen)); } else { clen = 0; cm = NULL; } } out: m_freem(control); return (error); } static struct mbuf * unp_addsockcred(struct thread *td, struct mbuf *control) { struct mbuf *m, *n, *n_prev; struct sockcred *sc; const struct cmsghdr *cm; int ngroups; int i; ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX); m = sbcreatecontrol(NULL, SOCKCREDSIZE(ngroups), SCM_CREDS, SOL_SOCKET); if (m == NULL) return (control); sc = (struct sockcred *) CMSG_DATA(mtod(m, struct cmsghdr *)); sc->sc_uid = td->td_ucred->cr_ruid; sc->sc_euid = td->td_ucred->cr_uid; sc->sc_gid = td->td_ucred->cr_rgid; sc->sc_egid = td->td_ucred->cr_gid; sc->sc_ngroups = ngroups; for (i = 0; i < sc->sc_ngroups; i++) sc->sc_groups[i] = td->td_ucred->cr_groups[i]; /* * Unlink SCM_CREDS control messages (struct cmsgcred), since just * created SCM_CREDS control message (struct sockcred) has another * format. */ if (control != NULL) for (n = control, n_prev = NULL; n != NULL;) { cm = mtod(n, struct cmsghdr *); if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_CREDS) { if (n_prev == NULL) control = n->m_next; else n_prev->m_next = n->m_next; n = m_free(n); } else { n_prev = n; n = n->m_next; } } /* Prepend it to the head. */ m->m_next = control; return (m); } static struct unpcb * fptounp(struct file *fp) { struct socket *so; if (fp->f_type != DTYPE_SOCKET) return (NULL); if ((so = fp->f_data) == NULL) return (NULL); if (so->so_proto->pr_domain != &localdomain) return (NULL); return sotounpcb(so); } static void unp_discard(struct file *fp) { struct unp_defer *dr; if (unp_externalize_fp(fp)) { dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK); dr->ud_fp = fp; UNP_DEFERRED_LOCK(); SLIST_INSERT_HEAD(&unp_defers, dr, ud_link); UNP_DEFERRED_UNLOCK(); atomic_add_int(&unp_defers_count, 1); taskqueue_enqueue(taskqueue_thread, &unp_defer_task); } else (void) closef(fp, (struct thread *)NULL); } static void unp_process_defers(void *arg __unused, int pending) { struct unp_defer *dr; SLIST_HEAD(, unp_defer) drl; int count; SLIST_INIT(&drl); for (;;) { UNP_DEFERRED_LOCK(); if (SLIST_FIRST(&unp_defers) == NULL) { UNP_DEFERRED_UNLOCK(); break; } SLIST_SWAP(&unp_defers, &drl, unp_defer); UNP_DEFERRED_UNLOCK(); count = 0; while ((dr = SLIST_FIRST(&drl)) != NULL) { SLIST_REMOVE_HEAD(&drl, ud_link); closef(dr->ud_fp, NULL); free(dr, M_TEMP); count++; } atomic_add_int(&unp_defers_count, -count); } } static void unp_internalize_fp(struct file *fp) { struct unpcb *unp; UNP_LINK_WLOCK(); if ((unp = fptounp(fp)) != NULL) { unp->unp_file = fp; unp->unp_msgcount++; } fhold(fp); unp_rights++; UNP_LINK_WUNLOCK(); } static int unp_externalize_fp(struct file *fp) { struct unpcb *unp; int ret; UNP_LINK_WLOCK(); if ((unp = fptounp(fp)) != NULL) { unp->unp_msgcount--; ret = 1; } else ret = 0; unp_rights--; UNP_LINK_WUNLOCK(); return (ret); } /* * unp_defer indicates whether additional work has been defered for a future * pass through unp_gc(). It is thread local and does not require explicit * synchronization. */ static int unp_marked; static int unp_unreachable; static void unp_accessable(struct filedescent **fdep, int fdcount) { struct unpcb *unp; struct file *fp; int i; for (i = 0; i < fdcount; i++) { fp = fdep[i]->fde_file; if ((unp = fptounp(fp)) == NULL) continue; if (unp->unp_gcflag & UNPGC_REF) continue; unp->unp_gcflag &= ~UNPGC_DEAD; unp->unp_gcflag |= UNPGC_REF; unp_marked++; } } static void unp_gc_process(struct unpcb *unp) { struct socket *soa; struct socket *so; struct file *fp; /* Already processed. */ if (unp->unp_gcflag & UNPGC_SCANNED) return; fp = unp->unp_file; /* * Check for a socket potentially in a cycle. It must be in a * queue as indicated by msgcount, and this must equal the file * reference count. Note that when msgcount is 0 the file is NULL. */ if ((unp->unp_gcflag & UNPGC_REF) == 0 && fp && unp->unp_msgcount != 0 && fp->f_count == unp->unp_msgcount) { unp->unp_gcflag |= UNPGC_DEAD; unp_unreachable++; return; } /* * Mark all sockets we reference with RIGHTS. */ so = unp->unp_socket; if ((unp->unp_gcflag & UNPGC_IGNORE_RIGHTS) == 0) { SOCKBUF_LOCK(&so->so_rcv); unp_scan(so->so_rcv.sb_mb, unp_accessable); SOCKBUF_UNLOCK(&so->so_rcv); } /* * Mark all sockets in our accept queue. */ ACCEPT_LOCK(); TAILQ_FOREACH(soa, &so->so_comp, so_list) { if ((sotounpcb(soa)->unp_gcflag & UNPGC_IGNORE_RIGHTS) != 0) continue; SOCKBUF_LOCK(&soa->so_rcv); unp_scan(soa->so_rcv.sb_mb, unp_accessable); SOCKBUF_UNLOCK(&soa->so_rcv); } ACCEPT_UNLOCK(); unp->unp_gcflag |= UNPGC_SCANNED; } static int unp_recycled; SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0, "Number of unreachable sockets claimed by the garbage collector."); static int unp_taskcount; SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0, "Number of times the garbage collector has run."); static void unp_gc(__unused void *arg, int pending) { struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead, NULL }; struct unp_head **head; struct file *f, **unref; struct unpcb *unp; int i, total; unp_taskcount++; UNP_LIST_LOCK(); /* * First clear all gc flags from previous runs, apart from * UNPGC_IGNORE_RIGHTS. */ for (head = heads; *head != NULL; head++) LIST_FOREACH(unp, *head, unp_link) unp->unp_gcflag = (unp->unp_gcflag & UNPGC_IGNORE_RIGHTS); /* * Scan marking all reachable sockets with UNPGC_REF. Once a socket * is reachable all of the sockets it references are reachable. * Stop the scan once we do a complete loop without discovering * a new reachable socket. */ do { unp_unreachable = 0; unp_marked = 0; for (head = heads; *head != NULL; head++) LIST_FOREACH(unp, *head, unp_link) unp_gc_process(unp); } while (unp_marked); UNP_LIST_UNLOCK(); if (unp_unreachable == 0) return; /* * Allocate space for a local list of dead unpcbs. */ unref = malloc(unp_unreachable * sizeof(struct file *), M_TEMP, M_WAITOK); /* * Iterate looking for sockets which have been specifically marked * as as unreachable and store them locally. */ UNP_LINK_RLOCK(); UNP_LIST_LOCK(); for (total = 0, head = heads; *head != NULL; head++) LIST_FOREACH(unp, *head, unp_link) if ((unp->unp_gcflag & UNPGC_DEAD) != 0) { f = unp->unp_file; if (unp->unp_msgcount == 0 || f == NULL || f->f_count != unp->unp_msgcount) continue; unref[total++] = f; fhold(f); KASSERT(total <= unp_unreachable, ("unp_gc: incorrect unreachable count.")); } UNP_LIST_UNLOCK(); UNP_LINK_RUNLOCK(); /* * Now flush all sockets, free'ing rights. This will free the * struct files associated with these sockets but leave each socket * with one remaining ref. */ for (i = 0; i < total; i++) { struct socket *so; so = unref[i]->f_data; CURVNET_SET(so->so_vnet); sorflush(so); CURVNET_RESTORE(); } /* * And finally release the sockets so they can be reclaimed. */ for (i = 0; i < total; i++) fdrop(unref[i], NULL); unp_recycled += total; free(unref, M_TEMP); } static void unp_dispose_mbuf(struct mbuf *m) { if (m) unp_scan(m, unp_freerights); } /* * Synchronize against unp_gc, which can trip over data as we are freeing it. */ static void unp_dispose(struct socket *so) { struct unpcb *unp; unp = sotounpcb(so); UNP_LIST_LOCK(); unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS; UNP_LIST_UNLOCK(); unp_dispose_mbuf(so->so_rcv.sb_mb); } static void unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int)) { struct mbuf *m; struct cmsghdr *cm; void *data; socklen_t clen, datalen; while (m0 != NULL) { for (m = m0; m; m = m->m_next) { if (m->m_type != MT_CONTROL) continue; cm = mtod(m, struct cmsghdr *); clen = m->m_len; while (cm != NULL) { if (sizeof(*cm) > clen || cm->cmsg_len > clen) break; data = CMSG_DATA(cm); datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; if (cm->cmsg_level == SOL_SOCKET && cm->cmsg_type == SCM_RIGHTS) { (*op)(data, datalen / sizeof(struct filedescent *)); } if (CMSG_SPACE(datalen) < clen) { clen -= CMSG_SPACE(datalen); cm = (struct cmsghdr *) ((caddr_t)cm + CMSG_SPACE(datalen)); } else { clen = 0; cm = NULL; } } } m0 = m0->m_nextpkt; } } /* * A helper function called by VFS before socket-type vnode reclamation. * For an active vnode it clears unp_vnode pointer and decrements unp_vnode * use count. */ void vfs_unp_reclaim(struct vnode *vp) { - struct socket *so; struct unpcb *unp; int active; ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim"); KASSERT(vp->v_type == VSOCK, ("vfs_unp_reclaim: vp->v_type != VSOCK")); active = 0; UNP_LINK_WLOCK(); - VOP_UNP_CONNECT(vp, &so); - if (so == NULL) - goto done; - unp = sotounpcb(so); + VOP_UNP_CONNECT(vp, &unp); if (unp == NULL) goto done; UNP_PCB_LOCK(unp); if (unp->unp_vnode == vp) { VOP_UNP_DETACH(vp); unp->unp_vnode = NULL; active = 1; } UNP_PCB_UNLOCK(unp); done: UNP_LINK_WUNLOCK(); if (active) vunref(vp); } #ifdef DDB static void db_print_indent(int indent) { int i; for (i = 0; i < indent; i++) db_printf(" "); } static void db_print_unpflags(int unp_flags) { int comma; comma = 0; if (unp_flags & UNP_HAVEPC) { db_printf("%sUNP_HAVEPC", comma ? ", " : ""); - comma = 1; - } - if (unp_flags & UNP_HAVEPCCACHED) { - db_printf("%sUNP_HAVEPCCACHED", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_WANTCRED) { db_printf("%sUNP_WANTCRED", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_CONNWAIT) { db_printf("%sUNP_CONNWAIT", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_CONNECTING) { db_printf("%sUNP_CONNECTING", comma ? ", " : ""); comma = 1; } if (unp_flags & UNP_BINDING) { db_printf("%sUNP_BINDING", comma ? ", " : ""); comma = 1; } } static void db_print_xucred(int indent, struct xucred *xu) { int comma, i; db_print_indent(indent); db_printf("cr_version: %u cr_uid: %u cr_ngroups: %d\n", xu->cr_version, xu->cr_uid, xu->cr_ngroups); db_print_indent(indent); db_printf("cr_groups: "); comma = 0; for (i = 0; i < xu->cr_ngroups; i++) { db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]); comma = 1; } db_printf("\n"); } static void db_print_unprefs(int indent, struct unp_head *uh) { struct unpcb *unp; int counter; counter = 0; LIST_FOREACH(unp, uh, unp_reflink) { if (counter % 4 == 0) db_print_indent(indent); db_printf("%p ", unp); if (counter % 4 == 3) db_printf("\n"); counter++; } if (counter != 0 && counter % 4 != 0) db_printf("\n"); } DB_SHOW_COMMAND(unpcb, db_show_unpcb) { struct unpcb *unp; if (!have_addr) { db_printf("usage: show unpcb \n"); return; } unp = (struct unpcb *)addr; db_printf("unp_socket: %p unp_vnode: %p\n", unp->unp_socket, unp->unp_vnode); db_printf("unp_ino: %ju unp_conn: %p\n", (uintmax_t)unp->unp_ino, unp->unp_conn); db_printf("unp_refs:\n"); db_print_unprefs(2, &unp->unp_refs); /* XXXRW: Would be nice to print the full address, if any. */ db_printf("unp_addr: %p\n", unp->unp_addr); db_printf("unp_gencnt: %llu\n", (unsigned long long)unp->unp_gencnt); db_printf("unp_flags: %x (", unp->unp_flags); db_print_unpflags(unp->unp_flags); db_printf(")\n"); db_printf("unp_peercred:\n"); db_print_xucred(2, &unp->unp_peercred); db_printf("unp_refcount: %u\n", unp->unp_refcount); } #endif Index: projects/clang500-import/sys/kern/vfs_default.c =================================================================== --- projects/clang500-import/sys/kern/vfs_default.c (revision 319548) +++ projects/clang500-import/sys/kern/vfs_default.c (revision 319549) @@ -1,1332 +1,1332 @@ /*- * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed * to Berkeley by John Heidemann of the UCLA Ficus project. * * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int vop_nolookup(struct vop_lookup_args *); static int vop_norename(struct vop_rename_args *); static int vop_nostrategy(struct vop_strategy_args *); static int get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, int dirbuflen, off_t *off, char **cpos, int *len, int *eofflag, struct thread *td); static int dirent_exists(struct vnode *vp, const char *dirname, struct thread *td); #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) static int vop_stdis_text(struct vop_is_text_args *ap); static int vop_stdset_text(struct vop_set_text_args *ap); static int vop_stdunset_text(struct vop_unset_text_args *ap); static int vop_stdget_writecount(struct vop_get_writecount_args *ap); static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); static int vop_stdfdatasync(struct vop_fdatasync_args *ap); static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); /* * This vnode table stores what we want to do if the filesystem doesn't * implement a particular VOP. * * If there is no specific entry here, we will return EOPNOTSUPP. * * Note that every filesystem has to implement either vop_access * or vop_accessx; failing to do so will result in immediate crash * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), * which calls vop_stdaccess() etc. */ struct vop_vector default_vnodeops = { .vop_default = NULL, .vop_bypass = VOP_EOPNOTSUPP, .vop_access = vop_stdaccess, .vop_accessx = vop_stdaccessx, .vop_advise = vop_stdadvise, .vop_advlock = vop_stdadvlock, .vop_advlockasync = vop_stdadvlockasync, .vop_advlockpurge = vop_stdadvlockpurge, .vop_allocate = vop_stdallocate, .vop_bmap = vop_stdbmap, .vop_close = VOP_NULL, .vop_fsync = VOP_NULL, .vop_fdatasync = vop_stdfdatasync, .vop_getpages = vop_stdgetpages, .vop_getpages_async = vop_stdgetpages_async, .vop_getwritemount = vop_stdgetwritemount, .vop_inactive = VOP_NULL, .vop_ioctl = VOP_ENOTTY, .vop_kqfilter = vop_stdkqfilter, .vop_islocked = vop_stdislocked, .vop_lock1 = vop_stdlock, .vop_lookup = vop_nolookup, .vop_open = VOP_NULL, .vop_pathconf = VOP_EINVAL, .vop_poll = vop_nopoll, .vop_putpages = vop_stdputpages, .vop_readlink = VOP_EINVAL, .vop_rename = vop_norename, .vop_revoke = VOP_PANIC, .vop_strategy = vop_nostrategy, .vop_unlock = vop_stdunlock, .vop_vptocnp = vop_stdvptocnp, .vop_vptofh = vop_stdvptofh, .vop_unp_bind = vop_stdunp_bind, .vop_unp_connect = vop_stdunp_connect, .vop_unp_detach = vop_stdunp_detach, .vop_is_text = vop_stdis_text, .vop_set_text = vop_stdset_text, .vop_unset_text = vop_stdunset_text, .vop_get_writecount = vop_stdget_writecount, .vop_add_writecount = vop_stdadd_writecount, }; /* * Series of placeholder functions for various error returns for * VOPs. */ int vop_eopnotsupp(struct vop_generic_args *ap) { /* printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); */ return (EOPNOTSUPP); } int vop_ebadf(struct vop_generic_args *ap) { return (EBADF); } int vop_enotty(struct vop_generic_args *ap) { return (ENOTTY); } int vop_einval(struct vop_generic_args *ap) { return (EINVAL); } int vop_enoent(struct vop_generic_args *ap) { return (ENOENT); } int vop_null(struct vop_generic_args *ap) { return (0); } /* * Helper function to panic on some bad VOPs in some filesystems. */ int vop_panic(struct vop_generic_args *ap) { panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); } /* * vop_std and vop_no are default functions for use by * filesystems that need the "default reasonable" implementation for a * particular operation. * * The documentation for the operations they implement exists (if it exists) * in the VOP_(9) manpage (all uppercase). */ /* * Default vop for filesystems that do not support name lookup */ static int vop_nolookup(ap) struct vop_lookup_args /* { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; } */ *ap; { *ap->a_vpp = NULL; return (ENOTDIR); } /* * vop_norename: * * Handle unlock and reference counting for arguments of vop_rename * for filesystems that do not implement rename operation. */ static int vop_norename(struct vop_rename_args *ap) { vop_rename_fail(ap); return (EOPNOTSUPP); } /* * vop_nostrategy: * * Strategy routine for VFS devices that have none. * * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy * routine. Typically this is done for a BIO_READ strategy call. * Typically B_INVAL is assumed to already be clear prior to a write * and should not be cleared manually unless you just made the buffer * invalid. BIO_ERROR should be cleared either way. */ static int vop_nostrategy (struct vop_strategy_args *ap) { printf("No strategy for buffer at %p\n", ap->a_bp); vn_printf(ap->a_vp, "vnode "); ap->a_bp->b_ioflags |= BIO_ERROR; ap->a_bp->b_error = EOPNOTSUPP; bufdone(ap->a_bp); return (EOPNOTSUPP); } static int get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, int dirbuflen, off_t *off, char **cpos, int *len, int *eofflag, struct thread *td) { int error, reclen; struct uio uio; struct iovec iov; struct dirent *dp; KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); if (*len == 0) { iov.iov_base = dirbuf; iov.iov_len = dirbuflen; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = *off; uio.uio_resid = dirbuflen; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; uio.uio_td = td; *eofflag = 0; #ifdef MAC error = mac_vnode_check_readdir(td->td_ucred, vp); if (error == 0) #endif error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, NULL, NULL); if (error) return (error); *off = uio.uio_offset; *cpos = dirbuf; *len = (dirbuflen - uio.uio_resid); if (*len == 0) return (ENOENT); } dp = (struct dirent *)(*cpos); reclen = dp->d_reclen; *dpp = dp; /* check for malformed directory.. */ if (reclen < DIRENT_MINSIZE) return (EINVAL); *cpos += reclen; *len -= reclen; return (0); } /* * Check if a named file exists in a given directory vnode. */ static int dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) { char *dirbuf, *cpos; int error, eofflag, dirbuflen, len, found; off_t off; struct dirent *dp; struct vattr va; KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); found = 0; error = VOP_GETATTR(vp, &va, td->td_ucred); if (error) return (found); dirbuflen = DEV_BSIZE; if (dirbuflen < va.va_blocksize) dirbuflen = va.va_blocksize; dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); off = 0; len = 0; do { error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, &cpos, &len, &eofflag, td); if (error) goto out; if (dp->d_type != DT_WHT && dp->d_fileno != 0 && strcmp(dp->d_name, dirname) == 0) { found = 1; goto out; } } while (len > 0 || !eofflag); out: free(dirbuf, M_TEMP); return (found); } int vop_stdaccess(struct vop_access_args *ap) { KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, ("invalid bit in accmode")); return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); } int vop_stdaccessx(struct vop_accessx_args *ap) { int error; accmode_t accmode = ap->a_accmode; error = vfs_unixify_accmode(&accmode); if (error != 0) return (error); if (accmode == 0) return (0); return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); } /* * Advisory record locking support */ int vop_stdadvlock(struct vop_advlock_args *ap) { struct vnode *vp; struct vattr vattr; int error; vp = ap->a_vp; if (ap->a_fl->l_whence == SEEK_END) { /* * The NFSv4 server must avoid doing a vn_lock() here, since it * can deadlock the nfsd threads, due to a LOR. Fortunately * the NFSv4 server always uses SEEK_SET and this code is * only required for the SEEK_END case. */ vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); VOP_UNLOCK(vp, 0); if (error) return (error); } else vattr.va_size = 0; return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); } int vop_stdadvlockasync(struct vop_advlockasync_args *ap) { struct vnode *vp; struct vattr vattr; int error; vp = ap->a_vp; if (ap->a_fl->l_whence == SEEK_END) { /* The size argument is only needed for SEEK_END. */ vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); VOP_UNLOCK(vp, 0); if (error) return (error); } else vattr.va_size = 0; return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); } int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) { struct vnode *vp; vp = ap->a_vp; lf_purgelocks(vp, &vp->v_lockf); return (0); } /* * vop_stdpathconf: * * Standard implementation of POSIX pathconf, to get information about limits * for a filesystem. * Override per filesystem for the case where the filesystem has smaller * limits. */ int vop_stdpathconf(ap) struct vop_pathconf_args /* { struct vnode *a_vp; int a_name; int *a_retval; } */ *ap; { switch (ap->a_name) { case _PC_ASYNC_IO: *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; return (0); case _PC_NAME_MAX: *ap->a_retval = NAME_MAX; return (0); case _PC_PATH_MAX: *ap->a_retval = PATH_MAX; return (0); case _PC_LINK_MAX: *ap->a_retval = LINK_MAX; return (0); case _PC_MAX_CANON: *ap->a_retval = MAX_CANON; return (0); case _PC_MAX_INPUT: *ap->a_retval = MAX_INPUT; return (0); case _PC_PIPE_BUF: *ap->a_retval = PIPE_BUF; return (0); case _PC_CHOWN_RESTRICTED: *ap->a_retval = 1; return (0); case _PC_VDISABLE: *ap->a_retval = _POSIX_VDISABLE; return (0); default: return (EINVAL); } /* NOTREACHED */ } /* * Standard lock, unlock and islocked functions. */ int vop_stdlock(ap) struct vop_lock1_args /* { struct vnode *a_vp; int a_flags; char *file; int line; } */ *ap; { struct vnode *vp = ap->a_vp; struct mtx *ilk; ilk = VI_MTX(vp); return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags, (ilk != NULL) ? &ilk->lock_object : NULL, ap->a_file, ap->a_line)); } /* See above. */ int vop_stdunlock(ap) struct vop_unlock_args /* { struct vnode *a_vp; int a_flags; } */ *ap; { struct vnode *vp = ap->a_vp; struct mtx *ilk; ilk = VI_MTX(vp); return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags, (ilk != NULL) ? &ilk->lock_object : NULL)); } /* See above. */ int vop_stdislocked(ap) struct vop_islocked_args /* { struct vnode *a_vp; } */ *ap; { return (lockstatus(ap->a_vp->v_vnlock)); } /* * Return true for select/poll. */ int vop_nopoll(ap) struct vop_poll_args /* { struct vnode *a_vp; int a_events; struct ucred *a_cred; struct thread *a_td; } */ *ap; { return (poll_no_poll(ap->a_events)); } /* * Implement poll for local filesystems that support it. */ int vop_stdpoll(ap) struct vop_poll_args /* { struct vnode *a_vp; int a_events; struct ucred *a_cred; struct thread *a_td; } */ *ap; { if (ap->a_events & ~POLLSTANDARD) return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); } /* * Return our mount point, as we will take charge of the writes. */ int vop_stdgetwritemount(ap) struct vop_getwritemount_args /* { struct vnode *a_vp; struct mount **a_mpp; } */ *ap; { struct mount *mp; /* * XXX Since this is called unlocked we may be recycled while * attempting to ref the mount. If this is the case or mountpoint * will be set to NULL. We only have to prevent this call from * returning with a ref to an incorrect mountpoint. It is not * harmful to return with a ref to our previous mountpoint. */ mp = ap->a_vp->v_mount; if (mp != NULL) { vfs_ref(mp); if (mp != ap->a_vp->v_mount) { vfs_rel(mp); mp = NULL; } } *(ap->a_mpp) = mp; return (0); } /* XXX Needs good comment and VOP_BMAP(9) manpage */ int vop_stdbmap(ap) struct vop_bmap_args /* { struct vnode *a_vp; daddr_t a_bn; struct bufobj **a_bop; daddr_t *a_bnp; int *a_runp; int *a_runb; } */ *ap; { if (ap->a_bop != NULL) *ap->a_bop = &ap->a_vp->v_bufobj; if (ap->a_bnp != NULL) *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); if (ap->a_runp != NULL) *ap->a_runp = 0; if (ap->a_runb != NULL) *ap->a_runb = 0; return (0); } int vop_stdfsync(ap) struct vop_fsync_args /* { struct vnode *a_vp; int a_waitfor; struct thread *a_td; } */ *ap; { struct vnode *vp = ap->a_vp; struct buf *bp; struct bufobj *bo; struct buf *nbp; int error = 0; int maxretry = 1000; /* large, arbitrarily chosen */ bo = &vp->v_bufobj; BO_LOCK(bo); loop1: /* * MARK/SCAN initialization to avoid infinite loops. */ TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { bp->b_vflags &= ~BV_SCANNED; bp->b_error = 0; } /* * Flush all dirty buffers associated with a vnode. */ loop2: TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { if ((bp->b_vflags & BV_SCANNED) != 0) continue; bp->b_vflags |= BV_SCANNED; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { if (ap->a_waitfor != MNT_WAIT) continue; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL, BO_LOCKPTR(bo)) != 0) { BO_LOCK(bo); goto loop1; } BO_LOCK(bo); } BO_UNLOCK(bo); KASSERT(bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); if ((bp->b_flags & B_DELWRI) == 0) panic("fsync: not dirty"); if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { vfs_bio_awrite(bp); } else { bremfree(bp); bawrite(bp); } BO_LOCK(bo); goto loop2; } /* * If synchronous the caller expects us to completely resolve all * dirty buffers in the system. Wait for in-progress I/O to * complete (which could include background bitmap writes), then * retry if dirty blocks still exist. */ if (ap->a_waitfor == MNT_WAIT) { bufobj_wwait(bo, 0, 0); if (bo->bo_dirty.bv_cnt > 0) { /* * If we are unable to write any of these buffers * then we fail now rather than trying endlessly * to write them out. */ TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) if ((error = bp->b_error) != 0) break; if (error == 0 && --maxretry >= 0) goto loop1; error = EAGAIN; } } BO_UNLOCK(bo); if (error == EAGAIN) vn_printf(vp, "fsync: giving up on dirty "); return (error); } static int vop_stdfdatasync(struct vop_fdatasync_args *ap) { return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); } int vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) { struct vop_fsync_args apf; apf.a_vp = ap->a_vp; apf.a_waitfor = MNT_WAIT; apf.a_td = ap->a_td; return (vop_stdfsync(&apf)); } /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ int vop_stdgetpages(ap) struct vop_getpages_args /* { struct vnode *a_vp; vm_page_t *a_m; int a_count; int *a_rbehind; int *a_rahead; } */ *ap; { return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); } static int vop_stdgetpages_async(struct vop_getpages_async_args *ap) { int error; error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, ap->a_rahead); ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); return (error); } int vop_stdkqfilter(struct vop_kqfilter_args *ap) { return vfs_kqfilter(ap); } /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ int vop_stdputpages(ap) struct vop_putpages_args /* { struct vnode *a_vp; vm_page_t *a_m; int a_count; int a_sync; int *a_rtvals; } */ *ap; { return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync, ap->a_rtvals); } int vop_stdvptofh(struct vop_vptofh_args *ap) { return (EOPNOTSUPP); } int vop_stdvptocnp(struct vop_vptocnp_args *ap) { struct vnode *vp = ap->a_vp; struct vnode **dvp = ap->a_vpp; struct ucred *cred = ap->a_cred; char *buf = ap->a_buf; int *buflen = ap->a_buflen; char *dirbuf, *cpos; int i, error, eofflag, dirbuflen, flags, locked, len, covered; off_t off; ino_t fileno; struct vattr va; struct nameidata nd; struct thread *td; struct dirent *dp; struct vnode *mvp; i = *buflen; error = 0; covered = 0; td = curthread; if (vp->v_type != VDIR) return (ENOENT); error = VOP_GETATTR(vp, &va, cred); if (error) return (error); VREF(vp); locked = VOP_ISLOCKED(vp); VOP_UNLOCK(vp, 0); NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, "..", vp, td); flags = FREAD; error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); if (error) { vn_lock(vp, locked | LK_RETRY); return (error); } NDFREE(&nd, NDF_ONLY_PNBUF); mvp = *dvp = nd.ni_vp; if (vp->v_mount != (*dvp)->v_mount && ((*dvp)->v_vflag & VV_ROOT) && ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { *dvp = (*dvp)->v_mount->mnt_vnodecovered; VREF(mvp); VOP_UNLOCK(mvp, 0); vn_close(mvp, FREAD, cred, td); VREF(*dvp); vn_lock(*dvp, LK_SHARED | LK_RETRY); covered = 1; } fileno = va.va_fileid; dirbuflen = DEV_BSIZE; if (dirbuflen < va.va_blocksize) dirbuflen = va.va_blocksize; dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); if ((*dvp)->v_type != VDIR) { error = ENOENT; goto out; } off = 0; len = 0; do { /* call VOP_READDIR of parent */ error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, &cpos, &len, &eofflag, td); if (error) goto out; if ((dp->d_type != DT_WHT) && (dp->d_fileno == fileno)) { if (covered) { VOP_UNLOCK(*dvp, 0); vn_lock(mvp, LK_SHARED | LK_RETRY); if (dirent_exists(mvp, dp->d_name, td)) { error = ENOENT; VOP_UNLOCK(mvp, 0); vn_lock(*dvp, LK_SHARED | LK_RETRY); goto out; } VOP_UNLOCK(mvp, 0); vn_lock(*dvp, LK_SHARED | LK_RETRY); } i -= dp->d_namlen; if (i < 0) { error = ENOMEM; goto out; } if (dp->d_namlen == 1 && dp->d_name[0] == '.') { error = ENOENT; } else { bcopy(dp->d_name, buf + i, dp->d_namlen); error = 0; } goto out; } } while (len > 0 || !eofflag); error = ENOENT; out: free(dirbuf, M_TEMP); if (!error) { *buflen = i; vref(*dvp); } if (covered) { vput(*dvp); vrele(mvp); } else { VOP_UNLOCK(mvp, 0); vn_close(mvp, FREAD, cred, td); } vn_lock(vp, locked | LK_RETRY); return (error); } int vop_stdallocate(struct vop_allocate_args *ap) { #ifdef __notyet__ struct statfs *sfs; off_t maxfilesize = 0; #endif struct iovec aiov; struct vattr vattr, *vap; struct uio auio; off_t fsize, len, cur, offset; uint8_t *buf; struct thread *td; struct vnode *vp; size_t iosize; int error; buf = NULL; error = 0; td = curthread; vap = &vattr; vp = ap->a_vp; len = *ap->a_len; offset = *ap->a_offset; error = VOP_GETATTR(vp, vap, td->td_ucred); if (error != 0) goto out; fsize = vap->va_size; iosize = vap->va_blocksize; if (iosize == 0) iosize = BLKDEV_IOSIZE; if (iosize > MAXPHYS) iosize = MAXPHYS; buf = malloc(iosize, M_TEMP, M_WAITOK); #ifdef __notyet__ /* * Check if the filesystem sets f_maxfilesize; if not use * VOP_SETATTR to perform the check. */ sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); error = VFS_STATFS(vp->v_mount, sfs, td); if (error == 0) maxfilesize = sfs->f_maxfilesize; free(sfs, M_STATFS); if (error != 0) goto out; if (maxfilesize) { if (offset > maxfilesize || len > maxfilesize || offset + len > maxfilesize) { error = EFBIG; goto out; } } else #endif if (offset + len > vap->va_size) { /* * Test offset + len against the filesystem's maxfilesize. */ VATTR_NULL(vap); vap->va_size = offset + len; error = VOP_SETATTR(vp, vap, td->td_ucred); if (error != 0) goto out; VATTR_NULL(vap); vap->va_size = fsize; error = VOP_SETATTR(vp, vap, td->td_ucred); if (error != 0) goto out; } for (;;) { /* * Read and write back anything below the nominal file * size. There's currently no way outside the filesystem * to know whether this area is sparse or not. */ cur = iosize; if ((offset % iosize) != 0) cur -= (offset % iosize); if (cur > len) cur = len; if (offset < fsize) { aiov.iov_base = buf; aiov.iov_len = cur; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = offset; auio.uio_resid = cur; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_td = td; error = VOP_READ(vp, &auio, 0, td->td_ucred); if (error != 0) break; if (auio.uio_resid > 0) { bzero(buf + cur - auio.uio_resid, auio.uio_resid); } } else { bzero(buf, cur); } aiov.iov_base = buf; aiov.iov_len = cur; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = offset; auio.uio_resid = cur; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; auio.uio_td = td; error = VOP_WRITE(vp, &auio, 0, td->td_ucred); if (error != 0) break; len -= cur; offset += cur; if (len == 0) break; if (should_yield()) break; } out: *ap->a_len = len; *ap->a_offset = offset; free(buf, M_TEMP); return (error); } int vop_stdadvise(struct vop_advise_args *ap) { struct vnode *vp; struct bufobj *bo; daddr_t startn, endn; off_t start, end; int bsize, error; vp = ap->a_vp; switch (ap->a_advice) { case POSIX_FADV_WILLNEED: /* * Do nothing for now. Filesystems should provide a * custom method which starts an asynchronous read of * the requested region. */ error = 0; break; case POSIX_FADV_DONTNEED: error = 0; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (vp->v_iflag & VI_DOOMED) { VOP_UNLOCK(vp, 0); break; } /* * Deactivate pages in the specified range from the backing VM * object. Pages that are resident in the buffer cache will * remain wired until their corresponding buffers are released * below. */ if (vp->v_object != NULL) { start = trunc_page(ap->a_start); end = round_page(ap->a_end); VM_OBJECT_RLOCK(vp->v_object); vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), OFF_TO_IDX(end)); VM_OBJECT_RUNLOCK(vp->v_object); } bo = &vp->v_bufobj; BO_RLOCK(bo); bsize = vp->v_bufobj.bo_bsize; startn = ap->a_start / bsize; endn = ap->a_end / bsize; error = bnoreuselist(&bo->bo_clean, bo, startn, endn); if (error == 0) error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); BO_RUNLOCK(bo); VOP_UNLOCK(vp, 0); break; default: error = EINVAL; break; } return (error); } int vop_stdunp_bind(struct vop_unp_bind_args *ap) { - ap->a_vp->v_socket = ap->a_socket; + ap->a_vp->v_unpcb = ap->a_unpcb; return (0); } int vop_stdunp_connect(struct vop_unp_connect_args *ap) { - *ap->a_socket = ap->a_vp->v_socket; + *ap->a_unpcb = ap->a_vp->v_unpcb; return (0); } int vop_stdunp_detach(struct vop_unp_detach_args *ap) { - ap->a_vp->v_socket = NULL; + ap->a_vp->v_unpcb = NULL; return (0); } static int vop_stdis_text(struct vop_is_text_args *ap) { return ((ap->a_vp->v_vflag & VV_TEXT) != 0); } static int vop_stdset_text(struct vop_set_text_args *ap) { ap->a_vp->v_vflag |= VV_TEXT; return (0); } static int vop_stdunset_text(struct vop_unset_text_args *ap) { ap->a_vp->v_vflag &= ~VV_TEXT; return (0); } static int vop_stdget_writecount(struct vop_get_writecount_args *ap) { *ap->a_writecount = ap->a_vp->v_writecount; return (0); } static int vop_stdadd_writecount(struct vop_add_writecount_args *ap) { ap->a_vp->v_writecount += ap->a_inc; return (0); } /* * vfs default ops * used to fill the vfs function table to get reasonable default return values. */ int vfs_stdroot (mp, flags, vpp) struct mount *mp; int flags; struct vnode **vpp; { return (EOPNOTSUPP); } int vfs_stdstatfs (mp, sbp) struct mount *mp; struct statfs *sbp; { return (EOPNOTSUPP); } int vfs_stdquotactl (mp, cmds, uid, arg) struct mount *mp; int cmds; uid_t uid; void *arg; { return (EOPNOTSUPP); } int vfs_stdsync(mp, waitfor) struct mount *mp; int waitfor; { struct vnode *vp, *mvp; struct thread *td; int error, lockreq, allerror = 0; td = curthread; lockreq = LK_EXCLUSIVE | LK_INTERLOCK; if (waitfor != MNT_WAIT) lockreq |= LK_NOWAIT; /* * Force stale buffer cache information to be flushed. */ loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { VI_UNLOCK(vp); continue; } if ((error = vget(vp, lockreq, td)) != 0) { if (error == ENOENT) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } continue; } error = VOP_FSYNC(vp, waitfor, td); if (error) allerror = error; vput(vp); } return (allerror); } int vfs_stdnosync (mp, waitfor) struct mount *mp; int waitfor; { return (0); } int vfs_stdvget (mp, ino, flags, vpp) struct mount *mp; ino_t ino; int flags; struct vnode **vpp; { return (EOPNOTSUPP); } int vfs_stdfhtovp (mp, fhp, flags, vpp) struct mount *mp; struct fid *fhp; int flags; struct vnode **vpp; { return (EOPNOTSUPP); } int vfs_stdinit (vfsp) struct vfsconf *vfsp; { return (0); } int vfs_stduninit (vfsp) struct vfsconf *vfsp; { return(0); } int vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) struct mount *mp; int cmd; struct vnode *filename_vp; int attrnamespace; const char *attrname; { if (filename_vp != NULL) VOP_UNLOCK(filename_vp, 0); return (EOPNOTSUPP); } int vfs_stdsysctl(mp, op, req) struct mount *mp; fsctlop_t op; struct sysctl_req *req; { return (EOPNOTSUPP); } /* end of vfs default ops */ Index: projects/clang500-import/sys/kern/vfs_subr.c =================================================================== --- projects/clang500-import/sys/kern/vfs_subr.c (revision 319548) +++ projects/clang500-import/sys/kern/vfs_subr.c (revision 319549) @@ -1,5533 +1,5536 @@ /*- * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 */ /* * External virtual filesystem routines */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ddb.h" #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif static void delmntque(struct vnode *vp); static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, int slptimeo); static void syncer_shutdown(void *arg, int howto); static int vtryrecycle(struct vnode *vp); static void v_init_counters(struct vnode *); static void v_incr_usecount(struct vnode *); static void v_incr_usecount_locked(struct vnode *); static void v_incr_devcount(struct vnode *); static void v_decr_devcount(struct vnode *); static void vgonel(struct vnode *); static void vfs_knllock(void *arg); static void vfs_knlunlock(void *arg); static void vfs_knl_assert_locked(void *arg); static void vfs_knl_assert_unlocked(void *arg); static void vnlru_return_batches(struct vfsops *mnt_op); static void destroy_vpollinfo(struct vpollinfo *vi); /* * Number of vnodes in existence. Increased whenever getnewvnode() * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. */ static unsigned long numvnodes; SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "Number of vnodes in existence"); static counter_u64_t vnodes_created; SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, "Number of vnodes created by getnewvnode"); static u_long mnt_free_list_batch = 128; SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); /* * Conversion tables for conversion from vnode types to inode formats * and back. */ enum vtype iftovt_tab[16] = { VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, }; int vttoif_tab[10] = { 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT }; /* * List of vnodes that are ready for recycling. */ static TAILQ_HEAD(freelst, vnode) vnode_free_list; /* * "Free" vnode target. Free vnodes are rarely completely free, but are * just ones that are cheap to recycle. Usually they are for files which * have been stat'd but not read; these usually have inode and namecache * data attached to them. This target is the preferred minimum size of a * sub-cache consisting mostly of such files. The system balances the size * of this sub-cache with its complement to try to prevent either from * thrashing while the other is relatively inactive. The targets express * a preference for the best balance. * * "Above" this target there are 2 further targets (watermarks) related * to recyling of free vnodes. In the best-operating case, the cache is * exactly full, the free list has size between vlowat and vhiwat above the * free target, and recycling from it and normal use maintains this state. * Sometimes the free list is below vlowat or even empty, but this state * is even better for immediate use provided the cache is not full. * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free * ones) to reach one of these states. The watermarks are currently hard- * coded as 4% and 9% of the available space higher. These and the default * of 25% for wantfreevnodes are too large if the memory size is large. * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim * whenever vnlru_proc() becomes active. */ static u_long wantfreevnodes; SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); static u_long freevnodes; SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "Number of \"free\" vnodes"); static counter_u64_t recycles_count; SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, "Number of vnodes recycled to meet vnode cache targets"); /* * Various variables used for debugging the new implementation of * reassignbuf(). * XXX these are probably of (very) limited utility now. */ static int reassignbufcalls; SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "Number of calls to reassignbuf"); static counter_u64_t free_owe_inact; SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, "Number of times free vnodes kept on active list due to VFS " "owing inactivation"); /* To keep more than one thread at a time from running vfs_getnewfsid */ static struct mtx mntid_mtx; /* * Lock for any access to the following: * vnode_free_list * numvnodes * freevnodes */ static struct mtx vnode_free_list_mtx; /* Publicly exported FS */ struct nfs_public nfs_pub; static uma_zone_t buf_trie_zone; /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ static uma_zone_t vnode_zone; static uma_zone_t vnodepoll_zone; /* * The workitem queue. * * It is useful to delay writes of file data and filesystem metadata * for tens of seconds so that quickly created and deleted files need * not waste disk bandwidth being created and removed. To realize this, * we append vnodes to a "workitem" queue. When running with a soft * updates implementation, most pending metadata dependencies should * not wait for more than a few seconds. Thus, mounted on block devices * are delayed only about a half the time that file data is delayed. * Similarly, directory updates are more critical, so are only delayed * about a third the time that file data is delayed. Thus, there are * SYNCER_MAXDELAY queues that are processed round-robin at a rate of * one each second (driven off the filesystem syncer process). The * syncer_delayno variable indicates the next queue that is to be processed. * Items that need to be processed soon are placed in this queue: * * syncer_workitem_pending[syncer_delayno] * * A delay of fifteen seconds is done by placing the request fifteen * entries later in the queue: * * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] * */ static int syncer_delayno; static long syncer_mask; LIST_HEAD(synclist, bufobj); static struct synclist *syncer_workitem_pending; /* * The sync_mtx protects: * bo->bo_synclist * sync_vnode_count * syncer_delayno * syncer_state * syncer_workitem_pending * syncer_worklist_len * rushjob */ static struct mtx sync_mtx; static struct cv sync_wakeup; #define SYNCER_MAXDELAY 32 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ static int syncdelay = 30; /* max time to delay syncing data */ static int filedelay = 30; /* time to delay syncing files */ SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "Time to delay syncing files (in seconds)"); static int dirdelay = 29; /* time to delay syncing directories */ SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "Time to delay syncing directories (in seconds)"); static int metadelay = 28; /* time to delay syncing metadata */ SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "Time to delay syncing metadata (in seconds)"); static int rushjob; /* number of slots to run ASAP */ static int stat_rush_requests; /* number of times I/O speeded up */ SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "Number of times I/O speeded up (rush requests)"); /* * When shutting down the syncer, run it at four times normal speed. */ #define SYNCER_SHUTDOWN_SPEEDUP 4 static int sync_vnode_count; static int syncer_worklist_len; static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } syncer_state; /* Target for maximum number of vnodes. */ int desiredvnodes; static int gapvnodes; /* gap between wanted and desired */ static int vhiwat; /* enough extras after expansion */ static int vlowat; /* minimal extras before expansion */ static int vstir; /* nonzero to stir non-free vnodes */ static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ static int sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) { int error, old_desiredvnodes; old_desiredvnodes = desiredvnodes; if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) return (error); if (old_desiredvnodes != desiredvnodes) { wantfreevnodes = desiredvnodes / 4; /* XXX locking seems to be incomplete. */ vfs_hash_changesize(desiredvnodes); cache_changesize(desiredvnodes); } return (0); } SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes"); SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); static int vnlru_nowhere; SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ static int vnsz2log; /* * Support for the bufobj clean & dirty pctrie. */ static void * buf_trie_alloc(struct pctrie *ptree) { return uma_zalloc(buf_trie_zone, M_NOWAIT); } static void buf_trie_free(struct pctrie *ptree, void *node) { uma_zfree(buf_trie_zone, node); } PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); /* * Initialize the vnode management data structures. * * Reevaluate the following cap on the number of vnodes after the physical * memory size exceeds 512GB. In the limit, as the physical memory size * grows, the ratio of the memory size in KB to to vnodes approaches 64:1. */ #ifndef MAXVNODES_MAX #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */ #endif /* * Initialize a vnode as it first enters the zone. */ static int vnode_init(void *mem, int size, int flags) { struct vnode *vp; struct bufobj *bo; vp = mem; bzero(vp, size); /* * Setup locks. */ vp->v_vnlock = &vp->v_lock; mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); /* * By default, don't allow shared locks unless filesystems opt-in. */ lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, LK_NOSHARE | LK_IS_VNODE); /* * Initialize bufobj. */ bo = &vp->v_bufobj; rw_init(BO_LOCKPTR(bo), "bufobj interlock"); bo->bo_private = vp; TAILQ_INIT(&bo->bo_clean.bv_hd); TAILQ_INIT(&bo->bo_dirty.bv_hd); /* * Initialize namecache. */ LIST_INIT(&vp->v_cache_src); TAILQ_INIT(&vp->v_cache_dst); /* * Initialize rangelocks. */ rangelock_init(&vp->v_rl); return (0); } /* * Free a vnode when it is cleared from the zone. */ static void vnode_fini(void *mem, int size) { struct vnode *vp; struct bufobj *bo; vp = mem; rangelock_destroy(&vp->v_rl); lockdestroy(vp->v_vnlock); mtx_destroy(&vp->v_interlock); bo = &vp->v_bufobj; rw_destroy(BO_LOCKPTR(bo)); } /* * Provide the size of NFS nclnode and NFS fh for calculation of the * vnode memory consumption. The size is specified directly to * eliminate dependency on NFS-private header. * * Other filesystems may use bigger or smaller (like UFS and ZFS) * private inode data, but the NFS-based estimation is ample enough. * Still, we care about differences in the size between 64- and 32-bit * platforms. * * Namecache structure size is heuristically * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. */ #ifdef _LP64 #define NFS_NCLNODE_SZ (528 + 64) #define NC_SZ 148 #else #define NFS_NCLNODE_SZ (360 + 32) #define NC_SZ 92 #endif static void vntblinit(void *dummy __unused) { u_int i; int physvnodes, virtvnodes; /* * Desiredvnodes is a function of the physical memory size and the * kernel's heap size. Generally speaking, it scales with the * physical memory size. The ratio of desiredvnodes to the physical * memory size is 1:16 until desiredvnodes exceeds 98,304. * Thereafter, the * marginal ratio of desiredvnodes to the physical memory size is * 1:64. However, desiredvnodes is limited by the kernel's heap * size. The memory required by desiredvnodes vnodes and vm objects * must not exceed 1/10th of the kernel's heap size. */ physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); desiredvnodes = min(physvnodes, virtvnodes); if (desiredvnodes > MAXVNODES_MAX) { if (bootverbose) printf("Reducing kern.maxvnodes %d -> %d\n", desiredvnodes, MAXVNODES_MAX); desiredvnodes = MAXVNODES_MAX; } wantfreevnodes = desiredvnodes / 4; mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); TAILQ_INIT(&vnode_free_list); mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); /* * Preallocate enough nodes to support one-per buf so that * we can not fail an insert. reassignbuf() callers can not * tolerate the insertion failure. */ buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); uma_prealloc(buf_trie_zone, nbuf); vnodes_created = counter_u64_alloc(M_WAITOK); recycles_count = counter_u64_alloc(M_WAITOK); free_owe_inact = counter_u64_alloc(M_WAITOK); /* * Initialize the filesystem syncer. */ syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, &syncer_mask); syncer_maxdelay = syncer_mask + 1; mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); cv_init(&sync_wakeup, "syncer"); for (i = 1; i <= sizeof(struct vnode); i <<= 1) vnsz2log++; vnsz2log--; } SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); /* * Mark a mount point as busy. Used to synchronize access and to delay * unmounting. Eventually, mountlist_mtx is not released on failure. * * vfs_busy() is a custom lock, it can block the caller. * vfs_busy() only sleeps if the unmount is active on the mount point. * For a mountpoint mp, vfs_busy-enforced lock is before lock of any * vnode belonging to mp. * * Lookup uses vfs_busy() to traverse mount points. * root fs var fs * / vnode lock A / vnode lock (/var) D * /var vnode lock B /log vnode lock(/var/log) E * vfs_busy lock C vfs_busy lock F * * Within each file system, the lock order is C->A->B and F->D->E. * * When traversing across mounts, the system follows that lock order: * * C->A->B * | * +->F->D->E * * The lookup() process for namei("/var") illustrates the process: * VOP_LOOKUP() obtains B while A is held * vfs_busy() obtains a shared lock on F while A and B are held * vput() releases lock on B * vput() releases lock on A * VFS_ROOT() obtains lock on D while shared lock on F is held * vfs_unbusy() releases shared lock on F * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. * Attempt to lock A (instead of vp_crossmp) while D is held would * violate the global order, causing deadlocks. * * dounmount() locks B while F is drained. */ int vfs_busy(struct mount *mp, int flags) { MPASS((flags & ~MBF_MASK) == 0); CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); MNT_ILOCK(mp); MNT_REF(mp); /* * If mount point is currently being unmounted, sleep until the * mount point fate is decided. If thread doing the unmounting fails, * it will clear MNTK_UNMOUNT flag before waking us up, indicating * that this mount point has survived the unmount attempt and vfs_busy * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE * flag in addition to MNTK_UNMOUNT, indicating that mount point is * about to be really destroyed. vfs_busy needs to release its * reference on the mount point in this case and return with ENOENT, * telling the caller that mount mount it tried to busy is no longer * valid. */ while (mp->mnt_kern_flag & MNTK_UNMOUNT) { if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { MNT_REL(mp); MNT_IUNLOCK(mp); CTR1(KTR_VFS, "%s: failed busying before sleeping", __func__); return (ENOENT); } if (flags & MBF_MNTLSTLOCK) mtx_unlock(&mountlist_mtx); mp->mnt_kern_flag |= MNTK_MWAIT; msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); if (flags & MBF_MNTLSTLOCK) mtx_lock(&mountlist_mtx); MNT_ILOCK(mp); } if (flags & MBF_MNTLSTLOCK) mtx_unlock(&mountlist_mtx); mp->mnt_lockref++; MNT_IUNLOCK(mp); return (0); } /* * Free a busy filesystem. */ void vfs_unbusy(struct mount *mp) { CTR2(KTR_VFS, "%s: mp %p", __func__, mp); MNT_ILOCK(mp); MNT_REL(mp); KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); mp->mnt_lockref--; if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); CTR1(KTR_VFS, "%s: waking up waiters", __func__); mp->mnt_kern_flag &= ~MNTK_DRAINING; wakeup(&mp->mnt_lockref); } MNT_IUNLOCK(mp); } /* * Lookup a mount point by filesystem identifier. */ struct mount * vfs_getvfs(fsid_t *fsid) { struct mount *mp; CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); mtx_lock(&mountlist_mtx); TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { vfs_ref(mp); mtx_unlock(&mountlist_mtx); return (mp); } } mtx_unlock(&mountlist_mtx); CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); return ((struct mount *) 0); } /* * Lookup a mount point by filesystem identifier, busying it before * returning. * * To avoid congestion on mountlist_mtx, implement simple direct-mapped * cache for popular filesystem identifiers. The cache is lockess, using * the fact that struct mount's are never freed. In worst case we may * get pointer to unmounted or even different filesystem, so we have to * check what we got, and go slow way if so. */ struct mount * vfs_busyfs(fsid_t *fsid) { #define FSID_CACHE_SIZE 256 typedef struct mount * volatile vmp_t; static vmp_t cache[FSID_CACHE_SIZE]; struct mount *mp; int error; uint32_t hash; CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); hash = fsid->val[0] ^ fsid->val[1]; hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); mp = cache[hash]; if (mp == NULL || mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) goto slow; if (vfs_busy(mp, 0) != 0) { cache[hash] = NULL; goto slow; } if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) return (mp); else vfs_unbusy(mp); slow: mtx_lock(&mountlist_mtx); TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { error = vfs_busy(mp, MBF_MNTLSTLOCK); if (error) { cache[hash] = NULL; mtx_unlock(&mountlist_mtx); return (NULL); } cache[hash] = mp; return (mp); } } CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); mtx_unlock(&mountlist_mtx); return ((struct mount *) 0); } /* * Check if a user can access privileged mount options. */ int vfs_suser(struct mount *mp, struct thread *td) { int error; /* * If the thread is jailed, but this is not a jail-friendly file * system, deny immediately. */ if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) return (EPERM); /* * If the file system was mounted outside the jail of the calling * thread, deny immediately. */ if (prison_check(td->td_ucred, mp->mnt_cred) != 0) return (EPERM); /* * If file system supports delegated administration, we don't check * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified * by the file system itself. * If this is not the user that did original mount, we check for * the PRIV_VFS_MOUNT_OWNER privilege. */ if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) return (error); } return (0); } /* * Get a new unique fsid. Try to make its val[0] unique, since this value * will be used to create fake device numbers for stat(). Also try (but * not so hard) make its val[0] unique mod 2^16, since some emulators only * support 16-bit device numbers. We end up with unique val[0]'s for the * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. * * Keep in mind that several mounts may be running in parallel. Starting * the search one past where the previous search terminated is both a * micro-optimization and a defense against returning the same fsid to * different mounts. */ void vfs_getnewfsid(struct mount *mp) { static uint16_t mntid_base; struct mount *nmp; fsid_t tfsid; int mtype; CTR2(KTR_VFS, "%s: mp %p", __func__, mp); mtx_lock(&mntid_mtx); mtype = mp->mnt_vfc->vfc_typenum; tfsid.val[1] = mtype; mtype = (mtype & 0xFF) << 24; for (;;) { tfsid.val[0] = makedev(255, mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); mntid_base++; if ((nmp = vfs_getvfs(&tfsid)) == NULL) break; vfs_rel(nmp); } mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; mtx_unlock(&mntid_mtx); } /* * Knob to control the precision of file timestamps: * * 0 = seconds only; nanoseconds zeroed. * 1 = seconds and nanoseconds, accurate within 1/HZ. * 2 = seconds and nanoseconds, truncated to microseconds. * >=3 = seconds and nanoseconds, maximum precision. */ enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; static int timestamp_precision = TSP_USEC; SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, ×tamp_precision, 0, "File timestamp precision (0: seconds, " "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " "3+: sec + ns (max. precision))"); /* * Get a current timestamp. */ void vfs_timestamp(struct timespec *tsp) { struct timeval tv; switch (timestamp_precision) { case TSP_SEC: tsp->tv_sec = time_second; tsp->tv_nsec = 0; break; case TSP_HZ: getnanotime(tsp); break; case TSP_USEC: microtime(&tv); TIMEVAL_TO_TIMESPEC(&tv, tsp); break; case TSP_NSEC: default: nanotime(tsp); break; } } /* * Set vnode attributes to VNOVAL */ void vattr_null(struct vattr *vap) { vap->va_type = VNON; vap->va_size = VNOVAL; vap->va_bytes = VNOVAL; vap->va_mode = VNOVAL; vap->va_nlink = VNOVAL; vap->va_uid = VNOVAL; vap->va_gid = VNOVAL; vap->va_fsid = VNOVAL; vap->va_fileid = VNOVAL; vap->va_blocksize = VNOVAL; vap->va_rdev = VNOVAL; vap->va_atime.tv_sec = VNOVAL; vap->va_atime.tv_nsec = VNOVAL; vap->va_mtime.tv_sec = VNOVAL; vap->va_mtime.tv_nsec = VNOVAL; vap->va_ctime.tv_sec = VNOVAL; vap->va_ctime.tv_nsec = VNOVAL; vap->va_birthtime.tv_sec = VNOVAL; vap->va_birthtime.tv_nsec = VNOVAL; vap->va_flags = VNOVAL; vap->va_gen = VNOVAL; vap->va_vaflags = 0; } /* * This routine is called when we have too many vnodes. It attempts * to free vnodes and will potentially free vnodes that still * have VM backing store (VM backing store is typically the cause * of a vnode blowout so we want to do this). Therefore, this operation * is not considered cheap. * * A number of conditions may prevent a vnode from being reclaimed. * the buffer cache may have references on the vnode, a directory * vnode may still have references due to the namei cache representing * underlying files, or the vnode may be in active use. It is not * desirable to reuse such vnodes. These conditions may cause the * number of vnodes to reach some minimum value regardless of what * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. */ static int vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger) { struct vnode *vp; int count, done, target; done = 0; vn_start_write(NULL, &mp, V_WAIT); MNT_ILOCK(mp); count = mp->mnt_nvnodelistsize; target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); target = target / 10 + 1; while (count != 0 && done < target) { vp = TAILQ_FIRST(&mp->mnt_nvnodelist); while (vp != NULL && vp->v_type == VMARKER) vp = TAILQ_NEXT(vp, v_nmntvnodes); if (vp == NULL) break; /* * XXX LRU is completely broken for non-free vnodes. First * by calling here in mountpoint order, then by moving * unselected vnodes to the end here, and most grossly by * removing the vlruvp() function that was supposed to * maintain the order. (This function was born broken * since syncer problems prevented it doing anything.) The * order is closer to LRC (C = Created). * * LRU reclaiming of vnodes seems to have last worked in * FreeBSD-3 where LRU wasn't mentioned under any spelling. * Then there was no hold count, and inactive vnodes were * simply put on the free list in LRU order. The separate * lists also break LRU. We prefer to reclaim from the * free list for technical reasons. This tends to thrash * the free list to keep very unrecently used held vnodes. * The problem is mitigated by keeping the free list large. */ TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); --count; if (!VI_TRYLOCK(vp)) goto next_iter; /* * If it's been deconstructed already, it's still * referenced, or it exceeds the trigger, skip it. * Also skip free vnodes. We are trying to make space * to expand the free list, not reduce it. */ if (vp->v_usecount || (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || ((vp->v_iflag & VI_FREE) != 0) || (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && vp->v_object->resident_page_count > trigger)) { VI_UNLOCK(vp); goto next_iter; } MNT_IUNLOCK(mp); vholdl(vp); if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { vdrop(vp); goto next_iter_mntunlocked; } VI_LOCK(vp); /* * v_usecount may have been bumped after VOP_LOCK() dropped * the vnode interlock and before it was locked again. * * It is not necessary to recheck VI_DOOMED because it can * only be set by another thread that holds both the vnode * lock and vnode interlock. If another thread has the * vnode lock before we get to VOP_LOCK() and obtains the * vnode interlock after VOP_LOCK() drops the vnode * interlock, the other thread will be unable to drop the * vnode lock before our VOP_LOCK() call fails. */ if (vp->v_usecount || (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || (vp->v_iflag & VI_FREE) != 0 || (vp->v_object != NULL && vp->v_object->resident_page_count > trigger)) { VOP_UNLOCK(vp, LK_INTERLOCK); vdrop(vp); goto next_iter_mntunlocked; } KASSERT((vp->v_iflag & VI_DOOMED) == 0, ("VI_DOOMED unexpectedly detected in vlrureclaim()")); counter_u64_add(recycles_count, 1); vgonel(vp); VOP_UNLOCK(vp, 0); vdropl(vp); done++; next_iter_mntunlocked: if (!should_yield()) goto relock_mnt; goto yield; next_iter: if (!should_yield()) continue; MNT_IUNLOCK(mp); yield: kern_yield(PRI_USER); relock_mnt: MNT_ILOCK(mp); } MNT_IUNLOCK(mp); vn_finished_write(mp); return done; } static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 0, "limit on vnode free requests per call to the vnlru_free routine"); /* * Attempt to reduce the free list by the requested amount. */ static void vnlru_free_locked(int count, struct vfsops *mnt_op) { struct vnode *vp; struct mount *mp; bool tried_batches; tried_batches = false; mtx_assert(&vnode_free_list_mtx, MA_OWNED); if (count > max_vnlru_free) count = max_vnlru_free; for (; count > 0; count--) { vp = TAILQ_FIRST(&vnode_free_list); /* * The list can be modified while the free_list_mtx * has been dropped and vp could be NULL here. */ if (vp == NULL) { if (tried_batches) break; mtx_unlock(&vnode_free_list_mtx); vnlru_return_batches(mnt_op); tried_batches = true; mtx_lock(&vnode_free_list_mtx); continue; } VNASSERT(vp->v_op != NULL, vp, ("vnlru_free: vnode already reclaimed.")); KASSERT((vp->v_iflag & VI_FREE) != 0, ("Removing vnode not on freelist")); KASSERT((vp->v_iflag & VI_ACTIVE) == 0, ("Mangling active vnode")); TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); /* * Don't recycle if our vnode is from different type * of mount point. Note that mp is type-safe, the * check does not reach unmapped address even if * vnode is reclaimed. * Don't recycle if we can't get the interlock without * blocking. */ if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); continue; } VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, vp, ("vp inconsistent on freelist")); /* * The clear of VI_FREE prevents activation of the * vnode. There is no sense in putting the vnode on * the mount point active list, only to remove it * later during recycling. Inline the relevant part * of vholdl(), to avoid triggering assertions or * activating. */ freevnodes--; vp->v_iflag &= ~VI_FREE; refcount_acquire(&vp->v_holdcnt); mtx_unlock(&vnode_free_list_mtx); VI_UNLOCK(vp); vtryrecycle(vp); /* * If the recycled succeeded this vdrop will actually free * the vnode. If not it will simply place it back on * the free list. */ vdrop(vp); mtx_lock(&vnode_free_list_mtx); } } void vnlru_free(int count, struct vfsops *mnt_op) { mtx_lock(&vnode_free_list_mtx); vnlru_free_locked(count, mnt_op); mtx_unlock(&vnode_free_list_mtx); } /* XXX some names and initialization are bad for limits and watermarks. */ static int vspace(void) { int space; gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ vlowat = vhiwat / 2; if (numvnodes > desiredvnodes) return (0); space = desiredvnodes - numvnodes; if (freevnodes > wantfreevnodes) space += freevnodes - wantfreevnodes; return (space); } static void vnlru_return_batch_locked(struct mount *mp) { struct vnode *vp; mtx_assert(&mp->mnt_listmtx, MA_OWNED); if (mp->mnt_tmpfreevnodelistsize == 0) return; TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); vp->v_mflag &= ~VMP_TMPMNTFREELIST; } mtx_lock(&vnode_free_list_mtx); TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); freevnodes += mp->mnt_tmpfreevnodelistsize; mtx_unlock(&vnode_free_list_mtx); mp->mnt_tmpfreevnodelistsize = 0; } static void vnlru_return_batch(struct mount *mp) { mtx_lock(&mp->mnt_listmtx); vnlru_return_batch_locked(mp); mtx_unlock(&mp->mnt_listmtx); } static void vnlru_return_batches(struct vfsops *mnt_op) { struct mount *mp, *nmp; bool need_unbusy; mtx_lock(&mountlist_mtx); for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { need_unbusy = false; if (mnt_op != NULL && mp->mnt_op != mnt_op) goto next; if (mp->mnt_tmpfreevnodelistsize == 0) goto next; if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { vnlru_return_batch(mp); need_unbusy = true; mtx_lock(&mountlist_mtx); } next: nmp = TAILQ_NEXT(mp, mnt_list); if (need_unbusy) vfs_unbusy(mp); } mtx_unlock(&mountlist_mtx); } /* * Attempt to recycle vnodes in a context that is always safe to block. * Calling vlrurecycle() from the bowels of filesystem code has some * interesting deadlock problems. */ static struct proc *vnlruproc; static int vnlruproc_sig; static void vnlru_proc(void) { struct mount *mp, *nmp; unsigned long ofreevnodes, onumvnodes; int done, force, reclaim_nc_src, trigger, usevnodes; EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, SHUTDOWN_PRI_FIRST); force = 0; for (;;) { kproc_suspend_check(vnlruproc); mtx_lock(&vnode_free_list_mtx); /* * If numvnodes is too large (due to desiredvnodes being * adjusted using its sysctl, or emergency growth), first * try to reduce it by discarding from the free list. */ if (numvnodes > desiredvnodes) vnlru_free_locked(numvnodes - desiredvnodes, NULL); /* * Sleep if the vnode cache is in a good state. This is * when it is not over-full and has space for about a 4% * or 9% expansion (by growing its size or inexcessively * reducing its free list). Otherwise, try to reclaim * space for a 10% expansion. */ if (vstir && force == 0) { force = 1; vstir = 0; } if (vspace() >= vlowat && force == 0) { vnlruproc_sig = 0; wakeup(&vnlruproc_sig); msleep(vnlruproc, &vnode_free_list_mtx, PVFS|PDROP, "vlruwt", hz); continue; } mtx_unlock(&vnode_free_list_mtx); done = 0; ofreevnodes = freevnodes; onumvnodes = numvnodes; /* * Calculate parameters for recycling. These are the same * throughout the loop to give some semblance of fairness. * The trigger point is to avoid recycling vnodes with lots * of resident pages. We aren't trying to free memory; we * are trying to recycle or at least free vnodes. */ if (numvnodes <= desiredvnodes) usevnodes = numvnodes - freevnodes; else usevnodes = numvnodes; if (usevnodes <= 0) usevnodes = 1; /* * The trigger value is is chosen to give a conservatively * large value to ensure that it alone doesn't prevent * making progress. The value can easily be so large that * it is effectively infinite in some congested and * misconfigured cases, and this is necessary. Normally * it is about 8 to 100 (pages), which is quite large. */ trigger = vm_cnt.v_page_count * 2 / usevnodes; if (force < 2) trigger = vsmalltrigger; reclaim_nc_src = force >= 3; mtx_lock(&mountlist_mtx); for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { nmp = TAILQ_NEXT(mp, mnt_list); continue; } done += vlrureclaim(mp, reclaim_nc_src, trigger); mtx_lock(&mountlist_mtx); nmp = TAILQ_NEXT(mp, mnt_list); vfs_unbusy(mp); } mtx_unlock(&mountlist_mtx); if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) uma_reclaim(); if (done == 0) { if (force == 0 || force == 1) { force = 2; continue; } if (force == 2) { force = 3; continue; } force = 0; vnlru_nowhere++; tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); } else kern_yield(PRI_USER); /* * After becoming active to expand above low water, keep * active until above high water. */ force = vspace() < vhiwat; } } static struct kproc_desc vnlru_kp = { "vnlru", vnlru_proc, &vnlruproc }; SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp); /* * Routines having to do with the management of the vnode table. */ /* * Try to recycle a freed vnode. We abort if anyone picks up a reference * before we actually vgone(). This function must be called with the vnode * held to prevent the vnode from being returned to the free list midway * through vgone(). */ static int vtryrecycle(struct vnode *vp) { struct mount *vnmp; CTR2(KTR_VFS, "%s: vp %p", __func__, vp); VNASSERT(vp->v_holdcnt, vp, ("vtryrecycle: Recycling vp %p without a reference.", vp)); /* * This vnode may found and locked via some other list, if so we * can't recycle it yet. */ if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { CTR2(KTR_VFS, "%s: impossible to recycle, vp %p lock is already held", __func__, vp); return (EWOULDBLOCK); } /* * Don't recycle if its filesystem is being suspended. */ if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { VOP_UNLOCK(vp, 0); CTR2(KTR_VFS, "%s: impossible to recycle, cannot start the write for %p", __func__, vp); return (EBUSY); } /* * If we got this far, we need to acquire the interlock and see if * anyone picked up this vnode from another list. If not, we will * mark it with DOOMED via vgonel() so that anyone who does find it * will skip over it. */ VI_LOCK(vp); if (vp->v_usecount) { VOP_UNLOCK(vp, LK_INTERLOCK); vn_finished_write(vnmp); CTR2(KTR_VFS, "%s: impossible to recycle, %p is already referenced", __func__, vp); return (EBUSY); } if ((vp->v_iflag & VI_DOOMED) == 0) { counter_u64_add(recycles_count, 1); vgonel(vp); } VOP_UNLOCK(vp, LK_INTERLOCK); vn_finished_write(vnmp); return (0); } static void vcheckspace(void) { if (vspace() < vlowat && vnlruproc_sig == 0) { vnlruproc_sig = 1; wakeup(vnlruproc); } } /* * Wait if necessary for space for a new vnode. */ static int getnewvnode_wait(int suspended) { mtx_assert(&vnode_free_list_mtx, MA_OWNED); if (numvnodes >= desiredvnodes) { if (suspended) { /* * The file system is being suspended. We cannot * risk a deadlock here, so allow allocation of * another vnode even if this would give too many. */ return (0); } if (vnlruproc_sig == 0) { vnlruproc_sig = 1; /* avoid unnecessary wakeups */ wakeup(vnlruproc); } msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, "vlruwk", hz); } /* Post-adjust like the pre-adjust in getnewvnode(). */ if (numvnodes + 1 > desiredvnodes && freevnodes > 1) vnlru_free_locked(1, NULL); return (numvnodes >= desiredvnodes ? ENFILE : 0); } /* * This hack is fragile, and probably not needed any more now that the * watermark handling works. */ void getnewvnode_reserve(u_int count) { struct thread *td; /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ /* XXX no longer so quick, but this part is not racy. */ mtx_lock(&vnode_free_list_mtx); if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, freevnodes - wantfreevnodes), NULL); mtx_unlock(&vnode_free_list_mtx); td = curthread; /* First try to be quick and racy. */ if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { td->td_vp_reserv += count; vcheckspace(); /* XXX no longer so quick, but more racy */ return; } else atomic_subtract_long(&numvnodes, count); mtx_lock(&vnode_free_list_mtx); while (count > 0) { if (getnewvnode_wait(0) == 0) { count--; td->td_vp_reserv++; atomic_add_long(&numvnodes, 1); } } vcheckspace(); mtx_unlock(&vnode_free_list_mtx); } /* * This hack is fragile, especially if desiredvnodes or wantvnodes are * misconfgured or changed significantly. Reducing desiredvnodes below * the reserved amount should cause bizarre behaviour like reducing it * below the number of active vnodes -- the system will try to reduce * numvnodes to match, but should fail, so the subtraction below should * not overflow. */ void getnewvnode_drop_reserve(void) { struct thread *td; td = curthread; atomic_subtract_long(&numvnodes, td->td_vp_reserv); td->td_vp_reserv = 0; } /* * Return the next vnode from the free list. */ int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, struct vnode **vpp) { struct vnode *vp; struct thread *td; struct lock_object *lo; static int cyclecount; int error; CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); vp = NULL; td = curthread; if (td->td_vp_reserv > 0) { td->td_vp_reserv -= 1; goto alloc; } mtx_lock(&vnode_free_list_mtx); if (numvnodes < desiredvnodes) cyclecount = 0; else if (cyclecount++ >= freevnodes) { cyclecount = 0; vstir = 1; } /* * Grow the vnode cache if it will not be above its target max * after growing. Otherwise, if the free list is nonempty, try * to reclaim 1 item from it before growing the cache (possibly * above its target max if the reclamation failed or is delayed). * Otherwise, wait for some space. In all cases, schedule * vnlru_proc() if we are getting short of space. The watermarks * should be chosen so that we never wait or even reclaim from * the free list to below its target minimum. */ if (numvnodes + 1 <= desiredvnodes) ; else if (freevnodes > 0) vnlru_free_locked(1, NULL); else { error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)); #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ if (error != 0) { mtx_unlock(&vnode_free_list_mtx); return (error); } #endif } vcheckspace(); atomic_add_long(&numvnodes, 1); mtx_unlock(&vnode_free_list_mtx); alloc: counter_u64_add(vnodes_created, 1); vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); /* * Locks are given the generic name "vnode" when created. * Follow the historic practice of using the filesystem * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. * * Locks live in a witness group keyed on their name. Thus, * when a lock is renamed, it must also move from the witness * group of its old name to the witness group of its new name. * * The change only needs to be made when the vnode moves * from one filesystem type to another. We ensure that each * filesystem use a single static name pointer for its tag so * that we can compare pointers rather than doing a strcmp(). */ lo = &vp->v_vnlock->lock_object; if (lo->lo_name != tag) { lo->lo_name = tag; WITNESS_DESTROY(lo); WITNESS_INIT(lo, tag); } /* * By default, don't allow shared locks unless filesystems opt-in. */ vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; /* * Finalize various vnode identity bits. */ KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); vp->v_type = VNON; vp->v_tag = tag; vp->v_op = vops; v_init_counters(vp); vp->v_bufobj.bo_ops = &buf_ops_bio; #ifdef DIAGNOSTIC if (mp == NULL && vops != &dead_vnodeops) printf("NULL mp in getnewvnode(9), tag %s\n", tag); #endif #ifdef MAC mac_vnode_init(vp); if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) mac_vnode_associate_singlelabel(mp, vp); #endif if (mp != NULL) { vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) vp->v_vflag |= VV_NOKNOTE; } /* * For the filesystems which do not use vfs_hash_insert(), * still initialize v_hash to have vfs_hash_index() useful. * E.g., nullfs uses vfs_hash_index() on the lower vnode for * its own hashing. */ vp->v_hash = (uintptr_t)vp >> vnsz2log; *vpp = vp; return (0); } /* * Delete from old mount point vnode list, if on one. */ static void delmntque(struct vnode *vp) { struct mount *mp; int active; mp = vp->v_mount; if (mp == NULL) return; MNT_ILOCK(mp); VI_LOCK(vp); KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, ("Active vnode list size %d > Vnode list size %d", mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); active = vp->v_iflag & VI_ACTIVE; vp->v_iflag &= ~VI_ACTIVE; if (active) { mtx_lock(&mp->mnt_listmtx); TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); mp->mnt_activevnodelistsize--; mtx_unlock(&mp->mnt_listmtx); } vp->v_mount = NULL; VI_UNLOCK(vp); VNASSERT(mp->mnt_nvnodelistsize > 0, vp, ("bad mount point vnode list size")); TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); mp->mnt_nvnodelistsize--; MNT_REL(mp); MNT_IUNLOCK(mp); } static void insmntque_stddtr(struct vnode *vp, void *dtr_arg) { vp->v_data = NULL; vp->v_op = &dead_vnodeops; vgone(vp); vput(vp); } /* * Insert into list of vnodes for the new mount point, if available. */ int insmntque1(struct vnode *vp, struct mount *mp, void (*dtr)(struct vnode *, void *), void *dtr_arg) { KASSERT(vp->v_mount == NULL, ("insmntque: vnode already on per mount vnode list")); VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); /* * We acquire the vnode interlock early to ensure that the * vnode cannot be recycled by another process releasing a * holdcnt on it before we get it on both the vnode list * and the active vnode list. The mount mutex protects only * manipulation of the vnode list and the vnode freelist * mutex protects only manipulation of the active vnode list. * Hence the need to hold the vnode interlock throughout. */ MNT_ILOCK(mp); VI_LOCK(vp); if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || mp->mnt_nvnodelistsize == 0)) && (vp->v_vflag & VV_FORCEINSMQ) == 0) { VI_UNLOCK(vp); MNT_IUNLOCK(mp); if (dtr != NULL) dtr(vp, dtr_arg); return (EBUSY); } vp->v_mount = mp; MNT_REF(mp); TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, ("neg mount point vnode list size")); mp->mnt_nvnodelistsize++; KASSERT((vp->v_iflag & VI_ACTIVE) == 0, ("Activating already active vnode")); vp->v_iflag |= VI_ACTIVE; mtx_lock(&mp->mnt_listmtx); TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); mp->mnt_activevnodelistsize++; mtx_unlock(&mp->mnt_listmtx); VI_UNLOCK(vp); MNT_IUNLOCK(mp); return (0); } int insmntque(struct vnode *vp, struct mount *mp) { return (insmntque1(vp, mp, insmntque_stddtr, NULL)); } /* * Flush out and invalidate all buffers associated with a bufobj * Called with the underlying object locked. */ int bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) { int error; BO_LOCK(bo); if (flags & V_SAVE) { error = bufobj_wwait(bo, slpflag, slptimeo); if (error) { BO_UNLOCK(bo); return (error); } if (bo->bo_dirty.bv_cnt > 0) { BO_UNLOCK(bo); if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) return (error); /* * XXX We could save a lock/unlock if this was only * enabled under INVARIANTS */ BO_LOCK(bo); if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) panic("vinvalbuf: dirty bufs"); } } /* * If you alter this loop please notice that interlock is dropped and * reacquired in flushbuflist. Special care is needed to ensure that * no race conditions occur from this. */ do { error = flushbuflist(&bo->bo_clean, flags, bo, slpflag, slptimeo); if (error == 0 && !(flags & V_CLEANONLY)) error = flushbuflist(&bo->bo_dirty, flags, bo, slpflag, slptimeo); if (error != 0 && error != EAGAIN) { BO_UNLOCK(bo); return (error); } } while (error != 0); /* * Wait for I/O to complete. XXX needs cleaning up. The vnode can * have write I/O in-progress but if there is a VM object then the * VM object can also have read-I/O in-progress. */ do { bufobj_wwait(bo, 0, 0); if ((flags & V_VMIO) == 0) { BO_UNLOCK(bo); if (bo->bo_object != NULL) { VM_OBJECT_WLOCK(bo->bo_object); vm_object_pip_wait(bo->bo_object, "bovlbx"); VM_OBJECT_WUNLOCK(bo->bo_object); } BO_LOCK(bo); } } while (bo->bo_numoutput > 0); BO_UNLOCK(bo); /* * Destroy the copy in the VM cache, too. */ if (bo->bo_object != NULL && (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { VM_OBJECT_WLOCK(bo->bo_object); vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? OBJPR_CLEANONLY : 0); VM_OBJECT_WUNLOCK(bo->bo_object); } #ifdef INVARIANTS BO_LOCK(bo); if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) panic("vinvalbuf: flush failed"); BO_UNLOCK(bo); #endif return (0); } /* * Flush out and invalidate all buffers associated with a vnode. * Called with the underlying object locked. */ int vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) { CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); ASSERT_VOP_LOCKED(vp, "vinvalbuf"); if (vp->v_object != NULL && vp->v_object->handle != vp) return (0); return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); } /* * Flush out buffers on the specified list. * */ static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, int slptimeo) { struct buf *bp, *nbp; int retval, error; daddr_t lblkno; b_xflags_t xflags; ASSERT_BO_WLOCKED(bo); retval = 0; TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { continue; } lblkno = 0; xflags = 0; if (nbp != NULL) { lblkno = nbp->b_lblkno; xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); } retval = EAGAIN; error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), "flushbuf", slpflag, slptimeo); if (error) { BO_LOCK(bo); return (error != ENOLCK ? error : EAGAIN); } KASSERT(bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); /* * XXX Since there are no node locks for NFS, I * believe there is a slight chance that a delayed * write will occur while sleeping just above, so * check for it. */ if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && (flags & V_SAVE)) { bremfree(bp); bp->b_flags |= B_ASYNC; bwrite(bp); BO_LOCK(bo); return (EAGAIN); /* XXX: why not loop ? */ } bremfree(bp); bp->b_flags |= (B_INVAL | B_RELBUF); bp->b_flags &= ~B_ASYNC; brelse(bp); BO_LOCK(bo); nbp = gbincore(bo, lblkno); if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) != xflags) break; /* nbp invalid */ } return (retval); } int bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) { struct buf *bp; int error; daddr_t lblkno; ASSERT_BO_LOCKED(bo); for (lblkno = startn;;) { again: bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); if (bp == NULL || bp->b_lblkno >= endn || bp->b_lblkno < startn) break; error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); if (error != 0) { BO_RLOCK(bo); if (error == ENOLCK) goto again; return (error); } KASSERT(bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); lblkno = bp->b_lblkno + 1; if ((bp->b_flags & B_MANAGED) == 0) bremfree(bp); bp->b_flags |= B_RELBUF; /* * In the VMIO case, use the B_NOREUSE flag to hint that the * pages backing each buffer in the range are unlikely to be * reused. Dirty buffers will have the hint applied once * they've been written. */ if (bp->b_vp->v_object != NULL) bp->b_flags |= B_NOREUSE; brelse(bp); BO_RLOCK(bo); } return (0); } /* * Truncate a file's buffer and pages to a specified length. This * is in lieu of the old vinvalbuf mechanism, which performed unneeded * sync activity. */ int vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) { struct buf *bp, *nbp; int anyfreed; int trunclbn; struct bufobj *bo; CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, vp, cred, blksize, (uintmax_t)length); /* * Round up to the *next* lbn. */ trunclbn = howmany(length, blksize); ASSERT_VOP_LOCKED(vp, "vtruncbuf"); restart: bo = &vp->v_bufobj; BO_LOCK(bo); anyfreed = 1; for (;anyfreed;) { anyfreed = 0; TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { if (bp->b_lblkno < trunclbn) continue; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo)) == ENOLCK) goto restart; bremfree(bp); bp->b_flags |= (B_INVAL | B_RELBUF); bp->b_flags &= ~B_ASYNC; brelse(bp); anyfreed = 1; BO_LOCK(bo); if (nbp != NULL && (((nbp->b_xflags & BX_VNCLEAN) == 0) || (nbp->b_vp != vp) || (nbp->b_flags & B_DELWRI))) { BO_UNLOCK(bo); goto restart; } } TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { if (bp->b_lblkno < trunclbn) continue; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo)) == ENOLCK) goto restart; bremfree(bp); bp->b_flags |= (B_INVAL | B_RELBUF); bp->b_flags &= ~B_ASYNC; brelse(bp); anyfreed = 1; BO_LOCK(bo); if (nbp != NULL && (((nbp->b_xflags & BX_VNDIRTY) == 0) || (nbp->b_vp != vp) || (nbp->b_flags & B_DELWRI) == 0)) { BO_UNLOCK(bo); goto restart; } } } if (length > 0) { restartsync: TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { if (bp->b_lblkno > 0) continue; /* * Since we hold the vnode lock this should only * fail if we're racing with the buf daemon. */ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo)) == ENOLCK) { goto restart; } VNASSERT((bp->b_flags & B_DELWRI), vp, ("buf(%p) on dirty queue without DELWRI", bp)); bremfree(bp); bawrite(bp); BO_LOCK(bo); goto restartsync; } } bufobj_wwait(bo, 0, 0); BO_UNLOCK(bo); vnode_pager_setsize(vp, length); return (0); } static void buf_vlist_remove(struct buf *bp) { struct bufv *bv; KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); ASSERT_BO_WLOCKED(bp->b_bufobj); KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != (BX_VNDIRTY|BX_VNCLEAN), ("buf_vlist_remove: Buf %p is on two lists", bp)); if (bp->b_xflags & BX_VNDIRTY) bv = &bp->b_bufobj->bo_dirty; else bv = &bp->b_bufobj->bo_clean; BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); bv->bv_cnt--; bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); } /* * Add the buffer to the sorted clean or dirty block list. * * NOTE: xflags is passed as a constant, optimizing this inline function! */ static void buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) { struct bufv *bv; struct buf *n; int error; ASSERT_BO_WLOCKED(bo); KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, ("dead bo %p", bo)); KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); bp->b_xflags |= xflags; if (xflags & BX_VNDIRTY) bv = &bo->bo_dirty; else bv = &bo->bo_clean; /* * Keep the list ordered. Optimize empty list insertion. Assume * we tend to grow at the tail so lookup_le should usually be cheaper * than _ge. */ if (bv->bv_cnt == 0 || bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); else TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); if (error) panic("buf_vlist_add: Preallocated nodes insufficient."); bv->bv_cnt++; } /* * Look up a buffer using the buffer tries. */ struct buf * gbincore(struct bufobj *bo, daddr_t lblkno) { struct buf *bp; ASSERT_BO_LOCKED(bo); bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); if (bp != NULL) return (bp); return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); } /* * Associate a buffer with a vnode. */ void bgetvp(struct vnode *vp, struct buf *bp) { struct bufobj *bo; bo = &vp->v_bufobj; ASSERT_BO_WLOCKED(bo); VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, ("bgetvp: bp already attached! %p", bp)); vhold(vp); bp->b_vp = vp; bp->b_bufobj = bo; /* * Insert onto list for new vnode. */ buf_vlist_add(bp, bo, BX_VNCLEAN); } /* * Disassociate a buffer from a vnode. */ void brelvp(struct buf *bp) { struct bufobj *bo; struct vnode *vp; CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); /* * Delete from old vnode list, if on one. */ vp = bp->b_vp; /* XXX */ bo = bp->b_bufobj; BO_LOCK(bo); if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) buf_vlist_remove(bp); else panic("brelvp: Buffer %p not on queue.", bp); if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { bo->bo_flag &= ~BO_ONWORKLST; mtx_lock(&sync_mtx); LIST_REMOVE(bo, bo_synclist); syncer_worklist_len--; mtx_unlock(&sync_mtx); } bp->b_vp = NULL; bp->b_bufobj = NULL; BO_UNLOCK(bo); vdrop(vp); } /* * Add an item to the syncer work queue. */ static void vn_syncer_add_to_worklist(struct bufobj *bo, int delay) { int slot; ASSERT_BO_WLOCKED(bo); mtx_lock(&sync_mtx); if (bo->bo_flag & BO_ONWORKLST) LIST_REMOVE(bo, bo_synclist); else { bo->bo_flag |= BO_ONWORKLST; syncer_worklist_len++; } if (delay > syncer_maxdelay - 2) delay = syncer_maxdelay - 2; slot = (syncer_delayno + delay) & syncer_mask; LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); mtx_unlock(&sync_mtx); } static int sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) { int error, len; mtx_lock(&sync_mtx); len = syncer_worklist_len - sync_vnode_count; mtx_unlock(&sync_mtx); error = SYSCTL_OUT(req, &len, sizeof(len)); return (error); } SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); static struct proc *updateproc; static void sched_sync(void); static struct kproc_desc up_kp = { "syncer", sched_sync, &updateproc }; SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); static int sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) { struct vnode *vp; struct mount *mp; *bo = LIST_FIRST(slp); if (*bo == NULL) return (0); vp = bo2vnode(*bo); if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) return (1); /* * We use vhold in case the vnode does not * successfully sync. vhold prevents the vnode from * going away when we unlock the sync_mtx so that * we can acquire the vnode interlock. */ vholdl(vp); mtx_unlock(&sync_mtx); VI_UNLOCK(vp); if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { vdrop(vp); mtx_lock(&sync_mtx); return (*bo == LIST_FIRST(slp)); } vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); (void) VOP_FSYNC(vp, MNT_LAZY, td); VOP_UNLOCK(vp, 0); vn_finished_write(mp); BO_LOCK(*bo); if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { /* * Put us back on the worklist. The worklist * routine will remove us from our current * position and then add us back in at a later * position. */ vn_syncer_add_to_worklist(*bo, syncdelay); } BO_UNLOCK(*bo); vdrop(vp); mtx_lock(&sync_mtx); return (0); } static int first_printf = 1; /* * System filesystem synchronizer daemon. */ static void sched_sync(void) { struct synclist *next, *slp; struct bufobj *bo; long starttime; struct thread *td = curthread; int last_work_seen; int net_worklist_len; int syncer_final_iter; int error; last_work_seen = 0; syncer_final_iter = 0; syncer_state = SYNCER_RUNNING; starttime = time_uptime; td->td_pflags |= TDP_NORUNNINGBUF; EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, SHUTDOWN_PRI_LAST); mtx_lock(&sync_mtx); for (;;) { if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter == 0) { mtx_unlock(&sync_mtx); kproc_suspend_check(td->td_proc); mtx_lock(&sync_mtx); } net_worklist_len = syncer_worklist_len - sync_vnode_count; if (syncer_state != SYNCER_RUNNING && starttime != time_uptime) { if (first_printf) { printf("\nSyncing disks, vnodes remaining... "); first_printf = 0; } printf("%d ", net_worklist_len); } starttime = time_uptime; /* * Push files whose dirty time has expired. Be careful * of interrupt race on slp queue. * * Skip over empty worklist slots when shutting down. */ do { slp = &syncer_workitem_pending[syncer_delayno]; syncer_delayno += 1; if (syncer_delayno == syncer_maxdelay) syncer_delayno = 0; next = &syncer_workitem_pending[syncer_delayno]; /* * If the worklist has wrapped since the * it was emptied of all but syncer vnodes, * switch to the FINAL_DELAY state and run * for one more second. */ if (syncer_state == SYNCER_SHUTTING_DOWN && net_worklist_len == 0 && last_work_seen == syncer_delayno) { syncer_state = SYNCER_FINAL_DELAY; syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; } } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && syncer_worklist_len > 0); /* * Keep track of the last time there was anything * on the worklist other than syncer vnodes. * Return to the SHUTTING_DOWN state if any * new work appears. */ if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) last_work_seen = syncer_delayno; if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) syncer_state = SYNCER_SHUTTING_DOWN; while (!LIST_EMPTY(slp)) { error = sync_vnode(slp, &bo, td); if (error == 1) { LIST_REMOVE(bo, bo_synclist); LIST_INSERT_HEAD(next, bo, bo_synclist); continue; } if (first_printf == 0) { /* * Drop the sync mutex, because some watchdog * drivers need to sleep while patting */ mtx_unlock(&sync_mtx); wdog_kern_pat(WD_LASTVAL); mtx_lock(&sync_mtx); } } if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) syncer_final_iter--; /* * The variable rushjob allows the kernel to speed up the * processing of the filesystem syncer process. A rushjob * value of N tells the filesystem syncer to process the next * N seconds worth of work on its queue ASAP. Currently rushjob * is used by the soft update code to speed up the filesystem * syncer process when the incore state is getting so far * ahead of the disk that the kernel memory pool is being * threatened with exhaustion. */ if (rushjob > 0) { rushjob -= 1; continue; } /* * Just sleep for a short period of time between * iterations when shutting down to allow some I/O * to happen. * * If it has taken us less than a second to process the * current work, then wait. Otherwise start right over * again. We can still lose time if any single round * takes more than two seconds, but it does not really * matter as we are just trying to generally pace the * filesystem activity. */ if (syncer_state != SYNCER_RUNNING || time_uptime == starttime) { thread_lock(td); sched_prio(td, PPAUSE); thread_unlock(td); } if (syncer_state != SYNCER_RUNNING) cv_timedwait(&sync_wakeup, &sync_mtx, hz / SYNCER_SHUTDOWN_SPEEDUP); else if (time_uptime == starttime) cv_timedwait(&sync_wakeup, &sync_mtx, hz); } } /* * Request the syncer daemon to speed up its work. * We never push it to speed up more than half of its * normal turn time, otherwise it could take over the cpu. */ int speedup_syncer(void) { int ret = 0; mtx_lock(&sync_mtx); if (rushjob < syncdelay / 2) { rushjob += 1; stat_rush_requests += 1; ret = 1; } mtx_unlock(&sync_mtx); cv_broadcast(&sync_wakeup); return (ret); } /* * Tell the syncer to speed up its work and run though its work * list several times, then tell it to shut down. */ static void syncer_shutdown(void *arg, int howto) { if (howto & RB_NOSYNC) return; mtx_lock(&sync_mtx); syncer_state = SYNCER_SHUTTING_DOWN; rushjob = 0; mtx_unlock(&sync_mtx); cv_broadcast(&sync_wakeup); kproc_shutdown(arg, howto); } void syncer_suspend(void) { syncer_shutdown(updateproc, 0); } void syncer_resume(void) { mtx_lock(&sync_mtx); first_printf = 1; syncer_state = SYNCER_RUNNING; mtx_unlock(&sync_mtx); cv_broadcast(&sync_wakeup); kproc_resume(updateproc); } /* * Reassign a buffer from one vnode to another. * Used to assign file specific control information * (indirect blocks) to the vnode to which they belong. */ void reassignbuf(struct buf *bp) { struct vnode *vp; struct bufobj *bo; int delay; #ifdef INVARIANTS struct bufv *bv; #endif vp = bp->b_vp; bo = bp->b_bufobj; ++reassignbufcalls; CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); /* * B_PAGING flagged buffers cannot be reassigned because their vp * is not fully linked in. */ if (bp->b_flags & B_PAGING) panic("cannot reassign paging buffer"); /* * Delete from old vnode list, if on one. */ BO_LOCK(bo); if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) buf_vlist_remove(bp); else panic("reassignbuf: Buffer %p not on queue.", bp); /* * If dirty, put on list of dirty buffers; otherwise insert onto list * of clean buffers. */ if (bp->b_flags & B_DELWRI) { if ((bo->bo_flag & BO_ONWORKLST) == 0) { switch (vp->v_type) { case VDIR: delay = dirdelay; break; case VCHR: delay = metadelay; break; default: delay = filedelay; } vn_syncer_add_to_worklist(bo, delay); } buf_vlist_add(bp, bo, BX_VNDIRTY); } else { buf_vlist_add(bp, bo, BX_VNCLEAN); if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { mtx_lock(&sync_mtx); LIST_REMOVE(bo, bo_synclist); syncer_worklist_len--; mtx_unlock(&sync_mtx); bo->bo_flag &= ~BO_ONWORKLST; } } #ifdef INVARIANTS bv = &bo->bo_clean; bp = TAILQ_FIRST(&bv->bv_hd); KASSERT(bp == NULL || bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); bp = TAILQ_LAST(&bv->bv_hd, buflists); KASSERT(bp == NULL || bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); bv = &bo->bo_dirty; bp = TAILQ_FIRST(&bv->bv_hd); KASSERT(bp == NULL || bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); bp = TAILQ_LAST(&bv->bv_hd, buflists); KASSERT(bp == NULL || bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); #endif BO_UNLOCK(bo); } /* * A temporary hack until refcount_* APIs are sorted out. */ static __inline int vfs_refcount_acquire_if_not_zero(volatile u_int *count) { u_int old; old = *count; for (;;) { if (old == 0) return (0); if (atomic_fcmpset_int(count, &old, old + 1)) return (1); } } static __inline int vfs_refcount_release_if_not_last(volatile u_int *count) { u_int old; old = *count; for (;;) { if (old == 1) return (0); if (atomic_fcmpset_int(count, &old, old - 1)) return (1); } } static void v_init_counters(struct vnode *vp) { VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, vp, ("%s called for an initialized vnode", __FUNCTION__)); ASSERT_VI_UNLOCKED(vp, __FUNCTION__); refcount_init(&vp->v_holdcnt, 1); refcount_init(&vp->v_usecount, 1); } static void v_incr_usecount_locked(struct vnode *vp) { ASSERT_VI_LOCKED(vp, __func__); if ((vp->v_iflag & VI_OWEINACT) != 0) { VNASSERT(vp->v_usecount == 0, vp, ("vnode with usecount and VI_OWEINACT set")); vp->v_iflag &= ~VI_OWEINACT; } refcount_acquire(&vp->v_usecount); v_incr_devcount(vp); } /* * Increment the use count on the vnode, taking care to reference * the driver's usecount if this is a chardev. */ static void v_incr_usecount(struct vnode *vp) { ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (vp->v_type != VCHR && vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) { VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, ("vnode with usecount and VI_OWEINACT set")); } else { VI_LOCK(vp); v_incr_usecount_locked(vp); VI_UNLOCK(vp); } } /* * Increment si_usecount of the associated device, if any. */ static void v_incr_devcount(struct vnode *vp) { ASSERT_VI_LOCKED(vp, __FUNCTION__); if (vp->v_type == VCHR && vp->v_rdev != NULL) { dev_lock(); vp->v_rdev->si_usecount++; dev_unlock(); } } /* * Decrement si_usecount of the associated device, if any. */ static void v_decr_devcount(struct vnode *vp) { ASSERT_VI_LOCKED(vp, __FUNCTION__); if (vp->v_type == VCHR && vp->v_rdev != NULL) { dev_lock(); vp->v_rdev->si_usecount--; dev_unlock(); } } /* * Grab a particular vnode from the free list, increment its * reference count and lock it. VI_DOOMED is set if the vnode * is being destroyed. Only callers who specify LK_RETRY will * see doomed vnodes. If inactive processing was delayed in * vput try to do it here. * * Notes on lockless counter manipulation: * _vhold, vputx and other routines make various decisions based * on either holdcnt or usecount being 0. As long as either counter * is not transitioning 0->1 nor 1->0, the manipulation can be done * with atomic operations. Otherwise the interlock is taken covering * both the atomic and additional actions. */ int vget(struct vnode *vp, int flags, struct thread *td) { int error, oweinact; VNASSERT((flags & LK_TYPE_MASK) != 0, vp, ("vget: invalid lock operation")); if ((flags & LK_INTERLOCK) != 0) ASSERT_VI_LOCKED(vp, __func__); else ASSERT_VI_UNLOCKED(vp, __func__); if ((flags & LK_VNHELD) != 0) VNASSERT((vp->v_holdcnt > 0), vp, ("vget: LK_VNHELD passed but vnode not held")); CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); if ((flags & LK_VNHELD) == 0) _vhold(vp, (flags & LK_INTERLOCK) != 0); if ((error = vn_lock(vp, flags)) != 0) { vdrop(vp); CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, vp); return (error); } if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) panic("vget: vn_lock failed to return ENOENT\n"); /* * We don't guarantee that any particular close will * trigger inactive processing so just make a best effort * here at preventing a reference to a removed file. If * we don't succeed no harm is done. * * Upgrade our holdcnt to a usecount. */ if (vp->v_type == VCHR || !vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) { VI_LOCK(vp); if ((vp->v_iflag & VI_OWEINACT) == 0) { oweinact = 0; } else { oweinact = 1; vp->v_iflag &= ~VI_OWEINACT; } refcount_acquire(&vp->v_usecount); v_incr_devcount(vp); if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && (flags & LK_NOWAIT) == 0) vinactive(vp, td); VI_UNLOCK(vp); } return (0); } /* * Increase the reference (use) and hold count of a vnode. * This will also remove the vnode from the free list if it is presently free. */ void vref(struct vnode *vp) { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); _vhold(vp, false); v_incr_usecount(vp); } void vrefl(struct vnode *vp) { ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); _vhold(vp, true); v_incr_usecount_locked(vp); } void vrefact(struct vnode *vp) { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (__predict_false(vp->v_type == VCHR)) { VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, ("%s: wrong ref counts", __func__)); vref(vp); return; } #ifdef INVARIANTS int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); old = atomic_fetchadd_int(&vp->v_usecount, 1); VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); #else refcount_acquire(&vp->v_holdcnt); refcount_acquire(&vp->v_usecount); #endif } /* * Return reference count of a vnode. * * The results of this call are only guaranteed when some mechanism is used to * stop other processes from gaining references to the vnode. This may be the * case if the caller holds the only reference. This is also useful when stale * data is acceptable as race conditions may be accounted for by some other * means. */ int vrefcnt(struct vnode *vp) { return (vp->v_usecount); } #define VPUTX_VRELE 1 #define VPUTX_VPUT 2 #define VPUTX_VUNREF 3 /* * Decrement the use and hold counts for a vnode. * * See an explanation near vget() as to why atomic operation is safe. */ static void vputx(struct vnode *vp, int func) { int error; KASSERT(vp != NULL, ("vputx: null vp")); if (func == VPUTX_VUNREF) ASSERT_VOP_LOCKED(vp, "vunref"); else if (func == VPUTX_VPUT) ASSERT_VOP_LOCKED(vp, "vput"); else KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (vp->v_type != VCHR && vfs_refcount_release_if_not_last(&vp->v_usecount)) { if (func == VPUTX_VPUT) VOP_UNLOCK(vp, 0); vdrop(vp); return; } VI_LOCK(vp); /* * We want to hold the vnode until the inactive finishes to * prevent vgone() races. We drop the use count here and the * hold count below when we're done. */ if (!refcount_release(&vp->v_usecount) || (vp->v_iflag & VI_DOINGINACT)) { if (func == VPUTX_VPUT) VOP_UNLOCK(vp, 0); v_decr_devcount(vp); vdropl(vp); return; } v_decr_devcount(vp); error = 0; if (vp->v_usecount != 0) { vn_printf(vp, "vputx: usecount not zero for vnode "); panic("vputx: usecount not zero"); } CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); /* * We must call VOP_INACTIVE with the node locked. Mark * as VI_DOINGINACT to avoid recursion. */ vp->v_iflag |= VI_OWEINACT; switch (func) { case VPUTX_VRELE: error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); VI_LOCK(vp); break; case VPUTX_VPUT: if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | LK_NOWAIT); VI_LOCK(vp); } break; case VPUTX_VUNREF: if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); VI_LOCK(vp); } break; } VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, ("vnode with usecount and VI_OWEINACT set")); if (error == 0) { if (vp->v_iflag & VI_OWEINACT) vinactive(vp, curthread); if (func != VPUTX_VUNREF) VOP_UNLOCK(vp, 0); } vdropl(vp); } /* * Vnode put/release. * If count drops to zero, call inactive routine and return to freelist. */ void vrele(struct vnode *vp) { vputx(vp, VPUTX_VRELE); } /* * Release an already locked vnode. This give the same effects as * unlock+vrele(), but takes less time and avoids releasing and * re-aquiring the lock (as vrele() acquires the lock internally.) */ void vput(struct vnode *vp) { vputx(vp, VPUTX_VPUT); } /* * Release an exclusively locked vnode. Do not unlock the vnode lock. */ void vunref(struct vnode *vp) { vputx(vp, VPUTX_VUNREF); } /* * Increase the hold count and activate if this is the first reference. */ void _vhold(struct vnode *vp, bool locked) { struct mount *mp; if (locked) ASSERT_VI_LOCKED(vp, __func__); else ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (!locked && vfs_refcount_acquire_if_not_zero(&vp->v_holdcnt)) { VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("_vhold: vnode with holdcnt is free")); return; } if (!locked) VI_LOCK(vp); if ((vp->v_iflag & VI_FREE) == 0) { refcount_acquire(&vp->v_holdcnt); if (!locked) VI_UNLOCK(vp); return; } VNASSERT(vp->v_holdcnt == 0, vp, ("%s: wrong hold count", __func__)); VNASSERT(vp->v_op != NULL, vp, ("%s: vnode already reclaimed.", __func__)); /* * Remove a vnode from the free list, mark it as in use, * and put it on the active list. */ mp = vp->v_mount; mtx_lock(&mp->mnt_listmtx); if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); mp->mnt_tmpfreevnodelistsize--; vp->v_mflag &= ~VMP_TMPMNTFREELIST; } else { mtx_lock(&vnode_free_list_mtx); TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); freevnodes--; mtx_unlock(&vnode_free_list_mtx); } KASSERT((vp->v_iflag & VI_ACTIVE) == 0, ("Activating already active vnode")); vp->v_iflag &= ~VI_FREE; vp->v_iflag |= VI_ACTIVE; TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); mp->mnt_activevnodelistsize++; mtx_unlock(&mp->mnt_listmtx); refcount_acquire(&vp->v_holdcnt); if (!locked) VI_UNLOCK(vp); } /* * Drop the hold count of the vnode. If this is the last reference to * the vnode we place it on the free list unless it has been vgone'd * (marked VI_DOOMED) in which case we will free it. * * Because the vnode vm object keeps a hold reference on the vnode if * there is at least one resident non-cached page, the vnode cannot * leave the active list without the page cleanup done. */ void _vdrop(struct vnode *vp, bool locked) { struct bufobj *bo; struct mount *mp; int active; if (locked) ASSERT_VI_LOCKED(vp, __func__); else ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if ((int)vp->v_holdcnt <= 0) panic("vdrop: holdcnt %d", vp->v_holdcnt); if (vfs_refcount_release_if_not_last(&vp->v_holdcnt)) { if (locked) VI_UNLOCK(vp); return; } if (!locked) VI_LOCK(vp); if (refcount_release(&vp->v_holdcnt) == 0) { VI_UNLOCK(vp); return; } if ((vp->v_iflag & VI_DOOMED) == 0) { /* * Mark a vnode as free: remove it from its active list * and put it up for recycling on the freelist. */ VNASSERT(vp->v_op != NULL, vp, ("vdropl: vnode already reclaimed.")); VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free")); VNASSERT(vp->v_holdcnt == 0, vp, ("vdropl: freeing when we shouldn't")); active = vp->v_iflag & VI_ACTIVE; if ((vp->v_iflag & VI_OWEINACT) == 0) { vp->v_iflag &= ~VI_ACTIVE; mp = vp->v_mount; mtx_lock(&mp->mnt_listmtx); if (active) { TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); mp->mnt_activevnodelistsize--; } TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); mp->mnt_tmpfreevnodelistsize++; vp->v_iflag |= VI_FREE; vp->v_mflag |= VMP_TMPMNTFREELIST; VI_UNLOCK(vp); if (mp->mnt_tmpfreevnodelistsize >= mnt_free_list_batch) vnlru_return_batch_locked(mp); mtx_unlock(&mp->mnt_listmtx); } else { VI_UNLOCK(vp); counter_u64_add(free_owe_inact, 1); } return; } /* * The vnode has been marked for destruction, so free it. * * The vnode will be returned to the zone where it will * normally remain until it is needed for another vnode. We * need to cleanup (or verify that the cleanup has already * been done) any residual data left from its current use * so as not to contaminate the freshly allocated vnode. */ CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); atomic_subtract_long(&numvnodes, 1); bo = &vp->v_bufobj; VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("cleaned vnode still on the free list.")); VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, ("clean blk trie not empty")); VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, ("dirty blk trie not empty")); VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, ("Dangling rangelock waiters")); VI_UNLOCK(vp); #ifdef MAC mac_vnode_destroy(vp); #endif if (vp->v_pollinfo != NULL) { destroy_vpollinfo(vp->v_pollinfo); vp->v_pollinfo = NULL; } #ifdef INVARIANTS /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ vp->v_op = NULL; #endif - bzero(&vp->v_un, sizeof(vp->v_un)); + vp->v_mountedhere = NULL; + vp->v_unpcb = NULL; + vp->v_rdev = NULL; + vp->v_fifoinfo = NULL; vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; vp->v_iflag = 0; vp->v_vflag = 0; bo->bo_flag = 0; uma_zfree(vnode_zone, vp); } /* * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT * flags. DOINGINACT prevents us from recursing in calls to vinactive. * OWEINACT tracks whether a vnode missed a call to inactive due to a * failed lock upgrade. */ void vinactive(struct vnode *vp, struct thread *td) { struct vm_object *obj; ASSERT_VOP_ELOCKED(vp, "vinactive"); ASSERT_VI_LOCKED(vp, "vinactive"); VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, ("vinactive: recursed on VI_DOINGINACT")); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); vp->v_iflag |= VI_DOINGINACT; vp->v_iflag &= ~VI_OWEINACT; VI_UNLOCK(vp); /* * Before moving off the active list, we must be sure that any * modified pages are converted into the vnode's dirty * buffers, since these will no longer be checked once the * vnode is on the inactive list. * * The write-out of the dirty pages is asynchronous. At the * point that VOP_INACTIVE() is called, there could still be * pending I/O and dirty pages in the object. */ obj = vp->v_object; if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { VM_OBJECT_WLOCK(obj); vm_object_page_clean(obj, 0, 0, 0); VM_OBJECT_WUNLOCK(obj); } VOP_INACTIVE(vp, td); VI_LOCK(vp); VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, ("vinactive: lost VI_DOINGINACT")); vp->v_iflag &= ~VI_DOINGINACT; } /* * Remove any vnodes in the vnode table belonging to mount point mp. * * If FORCECLOSE is not specified, there should not be any active ones, * return error if any are found (nb: this is a user error, not a * system error). If FORCECLOSE is specified, detach any active vnodes * that are found. * * If WRITECLOSE is set, only flush out regular file vnodes open for * writing. * * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. * * `rootrefs' specifies the base reference count for the root vnode * of this filesystem. The root vnode is considered busy if its * v_usecount exceeds this value. On a successful return, vflush(, td) * will call vrele() on the root vnode exactly rootrefs times. * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must * be zero. */ #ifdef DIAGNOSTIC static int busyprt = 0; /* print out busy vnodes */ SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); #endif int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) { struct vnode *vp, *mvp, *rootvp = NULL; struct vattr vattr; int busy = 0, error; CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, rootrefs, flags); if (rootrefs > 0) { KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, ("vflush: bad args")); /* * Get the filesystem root vnode. We can vput() it * immediately, since with rootrefs > 0, it won't go away. */ if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", __func__, error); return (error); } vput(rootvp); } loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { vholdl(vp); error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); if (error) { vdrop(vp); MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } /* * Skip over a vnodes marked VV_SYSTEM. */ if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { VOP_UNLOCK(vp, 0); vdrop(vp); continue; } /* * If WRITECLOSE is set, flush out unlinked but still open * files (even if open only for reading) and regular file * vnodes open for writing. */ if (flags & WRITECLOSE) { if (vp->v_object != NULL) { VM_OBJECT_WLOCK(vp->v_object); vm_object_page_clean(vp->v_object, 0, 0, 0); VM_OBJECT_WUNLOCK(vp->v_object); } error = VOP_FSYNC(vp, MNT_WAIT, td); if (error != 0) { VOP_UNLOCK(vp, 0); vdrop(vp); MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); return (error); } error = VOP_GETATTR(vp, &vattr, td->td_ucred); VI_LOCK(vp); if ((vp->v_type == VNON || (error == 0 && vattr.va_nlink > 0)) && (vp->v_writecount == 0 || vp->v_type != VREG)) { VOP_UNLOCK(vp, 0); vdropl(vp); continue; } } else VI_LOCK(vp); /* * With v_usecount == 0, all we need to do is clear out the * vnode data structures and we are done. * * If FORCECLOSE is set, forcibly close the vnode. */ if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { vgonel(vp); } else { busy++; #ifdef DIAGNOSTIC if (busyprt) vn_printf(vp, "vflush: busy vnode "); #endif } VOP_UNLOCK(vp, 0); vdropl(vp); } if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { /* * If just the root vnode is busy, and if its refcount * is equal to `rootrefs', then go ahead and kill it. */ VI_LOCK(rootvp); KASSERT(busy > 0, ("vflush: not busy")); VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, ("vflush: usecount %d < rootrefs %d", rootvp->v_usecount, rootrefs)); if (busy == 1 && rootvp->v_usecount == rootrefs) { VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); vgone(rootvp); VOP_UNLOCK(rootvp, 0); busy = 0; } else VI_UNLOCK(rootvp); } if (busy) { CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, busy); return (EBUSY); } for (; rootrefs > 0; rootrefs--) vrele(rootvp); return (0); } /* * Recycle an unused vnode to the front of the free list. */ int vrecycle(struct vnode *vp) { int recycled; VI_LOCK(vp); recycled = vrecyclel(vp); VI_UNLOCK(vp); return (recycled); } /* * vrecycle, with the vp interlock held. */ int vrecyclel(struct vnode *vp) { int recycled; ASSERT_VOP_ELOCKED(vp, __func__); ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); recycled = 0; if (vp->v_usecount == 0) { recycled = 1; vgonel(vp); } return (recycled); } /* * Eliminate all activity associated with a vnode * in preparation for reuse. */ void vgone(struct vnode *vp) { VI_LOCK(vp); vgonel(vp); VI_UNLOCK(vp); } static void notify_lowervp_vfs_dummy(struct mount *mp __unused, struct vnode *lowervp __unused) { } /* * Notify upper mounts about reclaimed or unlinked vnode. */ void vfs_notify_upper(struct vnode *vp, int event) { static struct vfsops vgonel_vfsops = { .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, }; struct mount *mp, *ump, *mmp; mp = vp->v_mount; if (mp == NULL) return; MNT_ILOCK(mp); if (TAILQ_EMPTY(&mp->mnt_uppers)) goto unlock; MNT_IUNLOCK(mp); mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); mmp->mnt_op = &vgonel_vfsops; mmp->mnt_kern_flag |= MNTK_MARKER; MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_VGONE_UPPER; for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { ump = TAILQ_NEXT(ump, mnt_upper_link); continue; } TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); MNT_IUNLOCK(mp); switch (event) { case VFS_NOTIFY_UPPER_RECLAIM: VFS_RECLAIM_LOWERVP(ump, vp); break; case VFS_NOTIFY_UPPER_UNLINK: VFS_UNLINK_LOWERVP(ump, vp); break; default: KASSERT(0, ("invalid event %d", event)); break; } MNT_ILOCK(mp); ump = TAILQ_NEXT(mmp, mnt_upper_link); TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); } free(mmp, M_TEMP); mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; wakeup(&mp->mnt_uppers); } unlock: MNT_IUNLOCK(mp); } /* * vgone, with the vp interlock held. */ static void vgonel(struct vnode *vp) { struct thread *td; int oweinact; int active; struct mount *mp; ASSERT_VOP_ELOCKED(vp, "vgonel"); ASSERT_VI_LOCKED(vp, "vgonel"); VNASSERT(vp->v_holdcnt, vp, ("vgonel: vp %p has no reference.", vp)); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); td = curthread; /* * Don't vgonel if we're already doomed. */ if (vp->v_iflag & VI_DOOMED) return; vp->v_iflag |= VI_DOOMED; /* * Check to see if the vnode is in use. If so, we have to call * VOP_CLOSE() and VOP_INACTIVE(). */ active = vp->v_usecount; oweinact = (vp->v_iflag & VI_OWEINACT); VI_UNLOCK(vp); vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); /* * If purging an active vnode, it must be closed and * deactivated before being reclaimed. */ if (active) VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); if (oweinact || active) { VI_LOCK(vp); if ((vp->v_iflag & VI_DOINGINACT) == 0) vinactive(vp, td); VI_UNLOCK(vp); } if (vp->v_type == VSOCK) vfs_unp_reclaim(vp); /* * Clean out any buffers associated with the vnode. * If the flush fails, just toss the buffers. */ mp = NULL; if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) (void) vn_start_secondary_write(vp, &mp, V_WAIT); if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { while (vinvalbuf(vp, 0, 0, 0) != 0) ; } BO_LOCK(&vp->v_bufobj); KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && vp->v_bufobj.bo_dirty.bv_cnt == 0 && TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && vp->v_bufobj.bo_clean.bv_cnt == 0, ("vp %p bufobj not invalidated", vp)); /* * For VMIO bufobj, BO_DEAD is set in vm_object_terminate() * after the object's page queue is flushed. */ if (vp->v_bufobj.bo_object == NULL) vp->v_bufobj.bo_flag |= BO_DEAD; BO_UNLOCK(&vp->v_bufobj); /* * Reclaim the vnode. */ if (VOP_RECLAIM(vp, td)) panic("vgone: cannot reclaim"); if (mp != NULL) vn_finished_secondary_write(mp); VNASSERT(vp->v_object == NULL, vp, ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); /* * Clear the advisory locks and wake up waiting threads. */ (void)VOP_ADVLOCKPURGE(vp); vp->v_lockf = NULL; /* * Delete from old mount point vnode list. */ delmntque(vp); cache_purge(vp); /* * Done with purge, reset to the standard lock and invalidate * the vnode. */ VI_LOCK(vp); vp->v_vnlock = &vp->v_lock; vp->v_op = &dead_vnodeops; vp->v_tag = "none"; vp->v_type = VBAD; } /* * Calculate the total number of references to a special device. */ int vcount(struct vnode *vp) { int count; dev_lock(); count = vp->v_rdev->si_usecount; dev_unlock(); return (count); } /* * Same as above, but using the struct cdev *as argument */ int count_dev(struct cdev *dev) { int count; dev_lock(); count = dev->si_usecount; dev_unlock(); return(count); } /* * Print out a description of a vnode. */ static char *typename[] = {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", "VMARKER"}; void vn_printf(struct vnode *vp, const char *fmt, ...) { va_list ap; char buf[256], buf2[16]; u_long flags; va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("%p: ", (void *)vp); printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); buf[0] = '\0'; buf[1] = '\0'; if (vp->v_vflag & VV_ROOT) strlcat(buf, "|VV_ROOT", sizeof(buf)); if (vp->v_vflag & VV_ISTTY) strlcat(buf, "|VV_ISTTY", sizeof(buf)); if (vp->v_vflag & VV_NOSYNC) strlcat(buf, "|VV_NOSYNC", sizeof(buf)); if (vp->v_vflag & VV_ETERNALDEV) strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); if (vp->v_vflag & VV_CACHEDLABEL) strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); if (vp->v_vflag & VV_TEXT) strlcat(buf, "|VV_TEXT", sizeof(buf)); if (vp->v_vflag & VV_COPYONWRITE) strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); if (vp->v_vflag & VV_SYSTEM) strlcat(buf, "|VV_SYSTEM", sizeof(buf)); if (vp->v_vflag & VV_PROCDEP) strlcat(buf, "|VV_PROCDEP", sizeof(buf)); if (vp->v_vflag & VV_NOKNOTE) strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); if (vp->v_vflag & VV_DELETED) strlcat(buf, "|VV_DELETED", sizeof(buf)); if (vp->v_vflag & VV_MD) strlcat(buf, "|VV_MD", sizeof(buf)); if (vp->v_vflag & VV_FORCEINSMQ) strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); if (flags != 0) { snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); strlcat(buf, buf2, sizeof(buf)); } if (vp->v_iflag & VI_MOUNT) strlcat(buf, "|VI_MOUNT", sizeof(buf)); if (vp->v_iflag & VI_DOOMED) strlcat(buf, "|VI_DOOMED", sizeof(buf)); if (vp->v_iflag & VI_FREE) strlcat(buf, "|VI_FREE", sizeof(buf)); if (vp->v_iflag & VI_ACTIVE) strlcat(buf, "|VI_ACTIVE", sizeof(buf)); if (vp->v_iflag & VI_DOINGINACT) strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); if (vp->v_iflag & VI_OWEINACT) strlcat(buf, "|VI_OWEINACT", sizeof(buf)); flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); if (flags != 0) { snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); strlcat(buf, buf2, sizeof(buf)); } printf(" flags (%s)\n", buf + 1); if (mtx_owned(VI_MTX(vp))) printf(" VI_LOCKed"); if (vp->v_object != NULL) printf(" v_object %p ref %d pages %d " "cleanbuf %d dirtybuf %d\n", vp->v_object, vp->v_object->ref_count, vp->v_object->resident_page_count, vp->v_bufobj.bo_clean.bv_cnt, vp->v_bufobj.bo_dirty.bv_cnt); printf(" "); lockmgr_printinfo(vp->v_vnlock); if (vp->v_data != NULL) VOP_PRINT(vp); } #ifdef DDB /* * List all of the locked vnodes in the system. * Called when debugging the kernel. */ DB_SHOW_COMMAND(lockedvnods, lockedvnodes) { struct mount *mp; struct vnode *vp; /* * Note: because this is DDB, we can't obey the locking semantics * for these structures, which means we could catch an inconsistent * state and dereference a nasty pointer. Not much to be done * about that. */ db_printf("Locked vnodes\n"); TAILQ_FOREACH(mp, &mountlist, mnt_list) { TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) vn_printf(vp, "vnode "); } } } /* * Show details about the given vnode. */ DB_SHOW_COMMAND(vnode, db_show_vnode) { struct vnode *vp; if (!have_addr) return; vp = (struct vnode *)addr; vn_printf(vp, "vnode "); } /* * Show details about the given mount point. */ DB_SHOW_COMMAND(mount, db_show_mount) { struct mount *mp; struct vfsopt *opt; struct statfs *sp; struct vnode *vp; char buf[512]; uint64_t mflags; u_int flags; if (!have_addr) { /* No address given, print short info about all mount points. */ TAILQ_FOREACH(mp, &mountlist, mnt_list) { db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); if (db_pager_quit) break; } db_printf("\nMore info: show mount \n"); return; } mp = (struct mount *)addr; db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); buf[0] = '\0'; mflags = mp->mnt_flag; #define MNT_FLAG(flag) do { \ if (mflags & (flag)) { \ if (buf[0] != '\0') \ strlcat(buf, ", ", sizeof(buf)); \ strlcat(buf, (#flag) + 4, sizeof(buf)); \ mflags &= ~(flag); \ } \ } while (0) MNT_FLAG(MNT_RDONLY); MNT_FLAG(MNT_SYNCHRONOUS); MNT_FLAG(MNT_NOEXEC); MNT_FLAG(MNT_NOSUID); MNT_FLAG(MNT_NFS4ACLS); MNT_FLAG(MNT_UNION); MNT_FLAG(MNT_ASYNC); MNT_FLAG(MNT_SUIDDIR); MNT_FLAG(MNT_SOFTDEP); MNT_FLAG(MNT_NOSYMFOLLOW); MNT_FLAG(MNT_GJOURNAL); MNT_FLAG(MNT_MULTILABEL); MNT_FLAG(MNT_ACLS); MNT_FLAG(MNT_NOATIME); MNT_FLAG(MNT_NOCLUSTERR); MNT_FLAG(MNT_NOCLUSTERW); MNT_FLAG(MNT_SUJ); MNT_FLAG(MNT_EXRDONLY); MNT_FLAG(MNT_EXPORTED); MNT_FLAG(MNT_DEFEXPORTED); MNT_FLAG(MNT_EXPORTANON); MNT_FLAG(MNT_EXKERB); MNT_FLAG(MNT_EXPUBLIC); MNT_FLAG(MNT_LOCAL); MNT_FLAG(MNT_QUOTA); MNT_FLAG(MNT_ROOTFS); MNT_FLAG(MNT_USER); MNT_FLAG(MNT_IGNORE); MNT_FLAG(MNT_UPDATE); MNT_FLAG(MNT_DELEXPORT); MNT_FLAG(MNT_RELOAD); MNT_FLAG(MNT_FORCE); MNT_FLAG(MNT_SNAPSHOT); MNT_FLAG(MNT_BYFSID); #undef MNT_FLAG if (mflags != 0) { if (buf[0] != '\0') strlcat(buf, ", ", sizeof(buf)); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "0x%016jx", mflags); } db_printf(" mnt_flag = %s\n", buf); buf[0] = '\0'; flags = mp->mnt_kern_flag; #define MNT_KERN_FLAG(flag) do { \ if (flags & (flag)) { \ if (buf[0] != '\0') \ strlcat(buf, ", ", sizeof(buf)); \ strlcat(buf, (#flag) + 5, sizeof(buf)); \ flags &= ~(flag); \ } \ } while (0) MNT_KERN_FLAG(MNTK_UNMOUNTF); MNT_KERN_FLAG(MNTK_ASYNC); MNT_KERN_FLAG(MNTK_SOFTDEP); MNT_KERN_FLAG(MNTK_NOINSMNTQ); MNT_KERN_FLAG(MNTK_DRAINING); MNT_KERN_FLAG(MNTK_REFEXPIRE); MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); MNT_KERN_FLAG(MNTK_SHARED_WRITES); MNT_KERN_FLAG(MNTK_NO_IOPF); MNT_KERN_FLAG(MNTK_VGONE_UPPER); MNT_KERN_FLAG(MNTK_VGONE_WAITER); MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); MNT_KERN_FLAG(MNTK_MARKER); MNT_KERN_FLAG(MNTK_USES_BCACHE); MNT_KERN_FLAG(MNTK_NOASYNC); MNT_KERN_FLAG(MNTK_UNMOUNT); MNT_KERN_FLAG(MNTK_MWAIT); MNT_KERN_FLAG(MNTK_SUSPEND); MNT_KERN_FLAG(MNTK_SUSPEND2); MNT_KERN_FLAG(MNTK_SUSPENDED); MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); MNT_KERN_FLAG(MNTK_NOKNOTE); #undef MNT_KERN_FLAG if (flags != 0) { if (buf[0] != '\0') strlcat(buf, ", ", sizeof(buf)); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "0x%08x", flags); } db_printf(" mnt_kern_flag = %s\n", buf); db_printf(" mnt_opt = "); opt = TAILQ_FIRST(mp->mnt_opt); if (opt != NULL) { db_printf("%s", opt->name); opt = TAILQ_NEXT(opt, link); while (opt != NULL) { db_printf(", %s", opt->name); opt = TAILQ_NEXT(opt, link); } } db_printf("\n"); sp = &mp->mnt_stat; db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); db_printf(" mnt_cred = { uid=%u ruid=%u", (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); if (jailed(mp->mnt_cred)) db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); db_printf(" }\n"); db_printf(" mnt_ref = %d\n", mp->mnt_ref); db_printf(" mnt_gen = %d\n", mp->mnt_gen); db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); db_printf(" mnt_activevnodelistsize = %d\n", mp->mnt_activevnodelistsize); db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); db_printf(" mnt_lockref = %d\n", mp->mnt_lockref); db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); db_printf(" mnt_secondary_accwrites = %d\n", mp->mnt_secondary_accwrites); db_printf(" mnt_gjprovider = %s\n", mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); db_printf("\n\nList of active vnodes\n"); TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { if (vp->v_type != VMARKER) { vn_printf(vp, "vnode "); if (db_pager_quit) break; } } db_printf("\n\nList of inactive vnodes\n"); TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { vn_printf(vp, "vnode "); if (db_pager_quit) break; } } } #endif /* DDB */ /* * Fill in a struct xvfsconf based on a struct vfsconf. */ static int vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) { struct xvfsconf xvfsp; bzero(&xvfsp, sizeof(xvfsp)); strcpy(xvfsp.vfc_name, vfsp->vfc_name); xvfsp.vfc_typenum = vfsp->vfc_typenum; xvfsp.vfc_refcount = vfsp->vfc_refcount; xvfsp.vfc_flags = vfsp->vfc_flags; /* * These are unused in userland, we keep them * to not break binary compatibility. */ xvfsp.vfc_vfsops = NULL; xvfsp.vfc_next = NULL; return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); } #ifdef COMPAT_FREEBSD32 struct xvfsconf32 { uint32_t vfc_vfsops; char vfc_name[MFSNAMELEN]; int32_t vfc_typenum; int32_t vfc_refcount; int32_t vfc_flags; uint32_t vfc_next; }; static int vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) { struct xvfsconf32 xvfsp; bzero(&xvfsp, sizeof(xvfsp)); strcpy(xvfsp.vfc_name, vfsp->vfc_name); xvfsp.vfc_typenum = vfsp->vfc_typenum; xvfsp.vfc_refcount = vfsp->vfc_refcount; xvfsp.vfc_flags = vfsp->vfc_flags; return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); } #endif /* * Top level filesystem related information gathering. */ static int sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) { struct vfsconf *vfsp; int error; error = 0; vfsconf_slock(); TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { #ifdef COMPAT_FREEBSD32 if (req->flags & SCTL_MASK32) error = vfsconf2x32(req, vfsp); else #endif error = vfsconf2x(req, vfsp); if (error) break; } vfsconf_sunlock(); return (error); } SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, "S,xvfsconf", "List of all configured filesystems"); #ifndef BURN_BRIDGES static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); static int vfs_sysctl(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1 - 1; /* XXX */ u_int namelen = arg2 + 1; /* XXX */ struct vfsconf *vfsp; log(LOG_WARNING, "userland calling deprecated sysctl, " "please rebuild world\n"); #if 1 || defined(COMPAT_PRELITE2) /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ if (namelen == 1) return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); #endif switch (name[1]) { case VFS_MAXTYPENUM: if (namelen != 2) return (ENOTDIR); return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); case VFS_CONF: if (namelen != 3) return (ENOTDIR); /* overloaded */ vfsconf_slock(); TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { if (vfsp->vfc_typenum == name[2]) break; } vfsconf_sunlock(); if (vfsp == NULL) return (EOPNOTSUPP); #ifdef COMPAT_FREEBSD32 if (req->flags & SCTL_MASK32) return (vfsconf2x32(req, vfsp)); else #endif return (vfsconf2x(req, vfsp)); } return (EOPNOTSUPP); } static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, vfs_sysctl, "Generic filesystem"); #if 1 || defined(COMPAT_PRELITE2) static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) { int error; struct vfsconf *vfsp; struct ovfsconf ovfs; vfsconf_slock(); TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { bzero(&ovfs, sizeof(ovfs)); ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ strcpy(ovfs.vfc_name, vfsp->vfc_name); ovfs.vfc_index = vfsp->vfc_typenum; ovfs.vfc_refcount = vfsp->vfc_refcount; ovfs.vfc_flags = vfsp->vfc_flags; error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); if (error != 0) { vfsconf_sunlock(); return (error); } } vfsconf_sunlock(); return (0); } #endif /* 1 || COMPAT_PRELITE2 */ #endif /* !BURN_BRIDGES */ #define KINFO_VNODESLOP 10 #ifdef notyet /* * Dump vnode list (via sysctl). */ /* ARGSUSED */ static int sysctl_vnode(SYSCTL_HANDLER_ARGS) { struct xvnode *xvn; struct mount *mp; struct vnode *vp; int error, len, n; /* * Stale numvnodes access is not fatal here. */ req->lock = 0; len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; if (!req->oldptr) /* Make an estimate */ return (SYSCTL_OUT(req, 0, len)); error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); n = 0; mtx_lock(&mountlist_mtx); TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) continue; MNT_ILOCK(mp); TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { if (n == len) break; vref(vp); xvn[n].xv_size = sizeof *xvn; xvn[n].xv_vnode = vp; xvn[n].xv_id = 0; /* XXX compat */ #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field XV_COPY(usecount); XV_COPY(writecount); XV_COPY(holdcnt); XV_COPY(mount); XV_COPY(numoutput); XV_COPY(type); #undef XV_COPY xvn[n].xv_flag = vp->v_vflag; switch (vp->v_type) { case VREG: case VDIR: case VLNK: break; case VBLK: case VCHR: if (vp->v_rdev == NULL) { vrele(vp); continue; } xvn[n].xv_dev = dev2udev(vp->v_rdev); break; case VSOCK: xvn[n].xv_socket = vp->v_socket; break; case VFIFO: xvn[n].xv_fifo = vp->v_fifoinfo; break; case VNON: case VBAD: default: /* shouldn't happen? */ vrele(vp); continue; } vrele(vp); ++n; } MNT_IUNLOCK(mp); mtx_lock(&mountlist_mtx); vfs_unbusy(mp); if (n == len) break; } mtx_unlock(&mountlist_mtx); error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); free(xvn, M_TEMP); return (error); } SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", ""); #endif static void unmount_or_warn(struct mount *mp) { int error; error = dounmount(mp, MNT_FORCE, curthread); if (error != 0) { printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); if (error == EBUSY) printf("BUSY)\n"); else printf("%d)\n", error); } } /* * Unmount all filesystems. The list is traversed in reverse order * of mounting to avoid dependencies. */ void vfs_unmountall(void) { struct mount *mp, *tmp; CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); /* * Since this only runs when rebooting, it is not interlocked. */ TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { vfs_ref(mp); /* * Forcibly unmounting "/dev" before "/" would prevent clean * unmount of the latter. */ if (mp == rootdevmp) continue; unmount_or_warn(mp); } if (rootdevmp != NULL) unmount_or_warn(rootdevmp); } /* * perform msync on all vnodes under a mount point * the mount point must be locked. */ void vfs_msync(struct mount *mp, int flags) { struct vnode *vp, *mvp; struct vm_object *obj; CTR2(KTR_VFS, "%s: mp %p", __func__, mp); vnlru_return_batch(mp); MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { obj = vp->v_object; if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { if (!vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, curthread)) { if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ vput(vp); continue; } obj = vp->v_object; if (obj != NULL) { VM_OBJECT_WLOCK(obj); vm_object_page_clean(obj, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); VM_OBJECT_WUNLOCK(obj); } vput(vp); } } else VI_UNLOCK(vp); } } static void destroy_vpollinfo_free(struct vpollinfo *vi) { knlist_destroy(&vi->vpi_selinfo.si_note); mtx_destroy(&vi->vpi_lock); uma_zfree(vnodepoll_zone, vi); } static void destroy_vpollinfo(struct vpollinfo *vi) { knlist_clear(&vi->vpi_selinfo.si_note, 1); seldrain(&vi->vpi_selinfo); destroy_vpollinfo_free(vi); } /* * Initialize per-vnode helper structure to hold poll-related state. */ void v_addpollinfo(struct vnode *vp) { struct vpollinfo *vi; if (vp->v_pollinfo != NULL) return; vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); VI_LOCK(vp); if (vp->v_pollinfo != NULL) { VI_UNLOCK(vp); destroy_vpollinfo_free(vi); return; } vp->v_pollinfo = vi; VI_UNLOCK(vp); } /* * Record a process's interest in events which might happen to * a vnode. Because poll uses the historic select-style interface * internally, this routine serves as both the ``check for any * pending events'' and the ``record my interest in future events'' * functions. (These are done together, while the lock is held, * to avoid race conditions.) */ int vn_pollrecord(struct vnode *vp, struct thread *td, int events) { v_addpollinfo(vp); mtx_lock(&vp->v_pollinfo->vpi_lock); if (vp->v_pollinfo->vpi_revents & events) { /* * This leaves events we are not interested * in available for the other process which * which presumably had requested them * (otherwise they would never have been * recorded). */ events &= vp->v_pollinfo->vpi_revents; vp->v_pollinfo->vpi_revents &= ~events; mtx_unlock(&vp->v_pollinfo->vpi_lock); return (events); } vp->v_pollinfo->vpi_events |= events; selrecord(td, &vp->v_pollinfo->vpi_selinfo); mtx_unlock(&vp->v_pollinfo->vpi_lock); return (0); } /* * Routine to create and manage a filesystem syncer vnode. */ #define sync_close ((int (*)(struct vop_close_args *))nullop) static int sync_fsync(struct vop_fsync_args *); static int sync_inactive(struct vop_inactive_args *); static int sync_reclaim(struct vop_reclaim_args *); static struct vop_vector sync_vnodeops = { .vop_bypass = VOP_EOPNOTSUPP, .vop_close = sync_close, /* close */ .vop_fsync = sync_fsync, /* fsync */ .vop_inactive = sync_inactive, /* inactive */ .vop_reclaim = sync_reclaim, /* reclaim */ .vop_lock1 = vop_stdlock, /* lock */ .vop_unlock = vop_stdunlock, /* unlock */ .vop_islocked = vop_stdislocked, /* islocked */ }; /* * Create a new filesystem syncer vnode for the specified mount point. */ void vfs_allocate_syncvnode(struct mount *mp) { struct vnode *vp; struct bufobj *bo; static long start, incr, next; int error; /* Allocate a new vnode */ error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); if (error != 0) panic("vfs_allocate_syncvnode: getnewvnode() failed"); vp->v_type = VNON; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_vflag |= VV_FORCEINSMQ; error = insmntque(vp, mp); if (error != 0) panic("vfs_allocate_syncvnode: insmntque() failed"); vp->v_vflag &= ~VV_FORCEINSMQ; VOP_UNLOCK(vp, 0); /* * Place the vnode onto the syncer worklist. We attempt to * scatter them about on the list so that they will go off * at evenly distributed times even if all the filesystems * are mounted at once. */ next += incr; if (next == 0 || next > syncer_maxdelay) { start /= 2; incr /= 2; if (start == 0) { start = syncer_maxdelay / 2; incr = syncer_maxdelay; } next = start; } bo = &vp->v_bufobj; BO_LOCK(bo); vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ mtx_lock(&sync_mtx); sync_vnode_count++; if (mp->mnt_syncer == NULL) { mp->mnt_syncer = vp; vp = NULL; } mtx_unlock(&sync_mtx); BO_UNLOCK(bo); if (vp != NULL) { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vgone(vp); vput(vp); } } void vfs_deallocate_syncvnode(struct mount *mp) { struct vnode *vp; mtx_lock(&sync_mtx); vp = mp->mnt_syncer; if (vp != NULL) mp->mnt_syncer = NULL; mtx_unlock(&sync_mtx); if (vp != NULL) vrele(vp); } /* * Do a lazy sync of the filesystem. */ static int sync_fsync(struct vop_fsync_args *ap) { struct vnode *syncvp = ap->a_vp; struct mount *mp = syncvp->v_mount; int error, save; struct bufobj *bo; /* * We only need to do something if this is a lazy evaluation. */ if (ap->a_waitfor != MNT_LAZY) return (0); /* * Move ourselves to the back of the sync list. */ bo = &syncvp->v_bufobj; BO_LOCK(bo); vn_syncer_add_to_worklist(bo, syncdelay); BO_UNLOCK(bo); /* * Walk the list of vnodes pushing all that are dirty and * not already on the sync list. */ if (vfs_busy(mp, MBF_NOWAIT) != 0) return (0); if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { vfs_unbusy(mp); return (0); } save = curthread_pflags_set(TDP_SYNCIO); vfs_msync(mp, MNT_NOWAIT); error = VFS_SYNC(mp, MNT_LAZY); curthread_pflags_restore(save); vn_finished_write(mp); vfs_unbusy(mp); return (error); } /* * The syncer vnode is no referenced. */ static int sync_inactive(struct vop_inactive_args *ap) { vgone(ap->a_vp); return (0); } /* * The syncer vnode is no longer needed and is being decommissioned. * * Modifications to the worklist must be protected by sync_mtx. */ static int sync_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct bufobj *bo; bo = &vp->v_bufobj; BO_LOCK(bo); mtx_lock(&sync_mtx); if (vp->v_mount->mnt_syncer == vp) vp->v_mount->mnt_syncer = NULL; if (bo->bo_flag & BO_ONWORKLST) { LIST_REMOVE(bo, bo_synclist); syncer_worklist_len--; sync_vnode_count--; bo->bo_flag &= ~BO_ONWORKLST; } mtx_unlock(&sync_mtx); BO_UNLOCK(bo); return (0); } /* * Check if vnode represents a disk device */ int vn_isdisk(struct vnode *vp, int *errp) { int error; if (vp->v_type != VCHR) { error = ENOTBLK; goto out; } error = 0; dev_lock(); if (vp->v_rdev == NULL) error = ENXIO; else if (vp->v_rdev->si_devsw == NULL) error = ENXIO; else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) error = ENOTBLK; dev_unlock(); out: if (errp != NULL) *errp = error; return (error == 0); } /* * Common filesystem object access control check routine. Accepts a * vnode's type, "mode", uid and gid, requested access mode, credentials, * and optional call-by-reference privused argument allowing vaccess() * to indicate to the caller whether privilege was used to satisfy the * request (obsoleted). Returns 0 on success, or an errno on failure. */ int vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, accmode_t accmode, struct ucred *cred, int *privused) { accmode_t dac_granted; accmode_t priv_granted; KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, ("invalid bit in accmode")); KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), ("VAPPEND without VWRITE")); /* * Look for a normal, non-privileged way to access the file/directory * as requested. If it exists, go with that. */ if (privused != NULL) *privused = 0; dac_granted = 0; /* Check the owner. */ if (cred->cr_uid == file_uid) { dac_granted |= VADMIN; if (file_mode & S_IXUSR) dac_granted |= VEXEC; if (file_mode & S_IRUSR) dac_granted |= VREAD; if (file_mode & S_IWUSR) dac_granted |= (VWRITE | VAPPEND); if ((accmode & dac_granted) == accmode) return (0); goto privcheck; } /* Otherwise, check the groups (first match) */ if (groupmember(file_gid, cred)) { if (file_mode & S_IXGRP) dac_granted |= VEXEC; if (file_mode & S_IRGRP) dac_granted |= VREAD; if (file_mode & S_IWGRP) dac_granted |= (VWRITE | VAPPEND); if ((accmode & dac_granted) == accmode) return (0); goto privcheck; } /* Otherwise, check everyone else. */ if (file_mode & S_IXOTH) dac_granted |= VEXEC; if (file_mode & S_IROTH) dac_granted |= VREAD; if (file_mode & S_IWOTH) dac_granted |= (VWRITE | VAPPEND); if ((accmode & dac_granted) == accmode) return (0); privcheck: /* * Build a privilege mask to determine if the set of privileges * satisfies the requirements when combined with the granted mask * from above. For each privilege, if the privilege is required, * bitwise or the request type onto the priv_granted mask. */ priv_granted = 0; if (type == VDIR) { /* * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC * requests, instead of PRIV_VFS_EXEC. */ if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) priv_granted |= VEXEC; } else { /* * Ensure that at least one execute bit is on. Otherwise, * a privileged user will always succeed, and we don't want * this to happen unless the file really is executable. */ if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) priv_granted |= VEXEC; } if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && !priv_check_cred(cred, PRIV_VFS_READ, 0)) priv_granted |= VREAD; if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) priv_granted |= (VWRITE | VAPPEND); if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) priv_granted |= VADMIN; if ((accmode & (priv_granted | dac_granted)) == accmode) { /* XXX audit: privilege used */ if (privused != NULL) *privused = 1; return (0); } return ((accmode & VADMIN) ? EPERM : EACCES); } /* * Credential check based on process requesting service, and per-attribute * permissions. */ int extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, struct thread *td, accmode_t accmode) { /* * Kernel-invoked always succeeds. */ if (cred == NOCRED) return (0); /* * Do not allow privileged processes in jail to directly manipulate * system attributes. */ switch (attrnamespace) { case EXTATTR_NAMESPACE_SYSTEM: /* Potentially should be: return (EPERM); */ return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); case EXTATTR_NAMESPACE_USER: return (VOP_ACCESS(vp, accmode, cred, td)); default: return (EPERM); } } #ifdef DEBUG_VFS_LOCKS /* * This only exists to suppress warnings from unlocked specfs accesses. It is * no longer ok to have an unlocked VFS. */ #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ (vp)->v_type == VCHR || (vp)->v_type == VBAD) int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "Drop into debugger on lock violation"); int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "Check for interlock across VOPs"); int vfs_badlock_print = 1; /* Print lock violations. */ SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "Print lock violations"); int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 0, "Print vnode details on lock violations"); #ifdef KDB int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); #endif static void vfs_badlock(const char *msg, const char *str, struct vnode *vp) { #ifdef KDB if (vfs_badlock_backtrace) kdb_backtrace(); #endif if (vfs_badlock_vnode) vn_printf(vp, "vnode "); if (vfs_badlock_print) printf("%s: %p %s\n", str, (void *)vp, msg); if (vfs_badlock_ddb) kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); } void assert_vi_locked(struct vnode *vp, const char *str) { if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) vfs_badlock("interlock is not locked but should be", str, vp); } void assert_vi_unlocked(struct vnode *vp, const char *str) { if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) vfs_badlock("interlock is locked but should not be", str, vp); } void assert_vop_locked(struct vnode *vp, const char *str) { int locked; if (!IGNORE_LOCK(vp)) { locked = VOP_ISLOCKED(vp); if (locked == 0 || locked == LK_EXCLOTHER) vfs_badlock("is not locked but should be", str, vp); } } void assert_vop_unlocked(struct vnode *vp, const char *str) { if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) vfs_badlock("is locked but should not be", str, vp); } void assert_vop_elocked(struct vnode *vp, const char *str) { if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) vfs_badlock("is not exclusive locked but should be", str, vp); } #endif /* DEBUG_VFS_LOCKS */ void vop_rename_fail(struct vop_rename_args *ap) { if (ap->a_tvp != NULL) vput(ap->a_tvp); if (ap->a_tdvp == ap->a_tvp) vrele(ap->a_tdvp); else vput(ap->a_tdvp); vrele(ap->a_fdvp); vrele(ap->a_fvp); } void vop_rename_pre(void *ap) { struct vop_rename_args *a = ap; #ifdef DEBUG_VFS_LOCKS if (a->a_tvp) ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); /* Check the source (from). */ if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); /* Check the target. */ if (a->a_tvp) ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); #endif if (a->a_tdvp != a->a_fdvp) vhold(a->a_fdvp); if (a->a_tvp != a->a_fvp) vhold(a->a_fvp); vhold(a->a_tdvp); if (a->a_tvp) vhold(a->a_tvp); } #ifdef DEBUG_VFS_LOCKS void vop_strategy_pre(void *ap) { struct vop_strategy_args *a; struct buf *bp; a = ap; bp = a->a_bp; /* * Cluster ops lock their component buffers but not the IO container. */ if ((bp->b_flags & B_CLUSTER) != 0) return; if (panicstr == NULL && !BUF_ISLOCKED(bp)) { if (vfs_badlock_print) printf( "VOP_STRATEGY: bp is not locked but should be\n"); if (vfs_badlock_ddb) kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); } } void vop_lock_pre(void *ap) { struct vop_lock1_args *a = ap; if ((a->a_flags & LK_INTERLOCK) == 0) ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); else ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); } void vop_lock_post(void *ap, int rc) { struct vop_lock1_args *a = ap; ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); } void vop_unlock_pre(void *ap) { struct vop_unlock_args *a = ap; if (a->a_flags & LK_INTERLOCK) ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); } void vop_unlock_post(void *ap, int rc) { struct vop_unlock_args *a = ap; if (a->a_flags & LK_INTERLOCK) ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); } #endif void vop_create_post(void *ap, int rc) { struct vop_create_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); } void vop_deleteextattr_post(void *ap, int rc) { struct vop_deleteextattr_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); } void vop_link_post(void *ap, int rc) { struct vop_link_args *a = ap; if (!rc) { VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); } } void vop_mkdir_post(void *ap, int rc) { struct vop_mkdir_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); } void vop_mknod_post(void *ap, int rc) { struct vop_mknod_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); } void vop_reclaim_post(void *ap, int rc) { struct vop_reclaim_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); } void vop_remove_post(void *ap, int rc) { struct vop_remove_args *a = ap; if (!rc) { VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); } } void vop_rename_post(void *ap, int rc) { struct vop_rename_args *a = ap; long hint; if (!rc) { hint = NOTE_WRITE; if (a->a_fdvp == a->a_tdvp) { if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) hint |= NOTE_LINK; VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); } else { hint |= NOTE_EXTEND; if (a->a_fvp->v_type == VDIR) hint |= NOTE_LINK; VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && a->a_tvp->v_type == VDIR) hint &= ~NOTE_LINK; VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); } VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); if (a->a_tvp) VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); } if (a->a_tdvp != a->a_fdvp) vdrop(a->a_fdvp); if (a->a_tvp != a->a_fvp) vdrop(a->a_fvp); vdrop(a->a_tdvp); if (a->a_tvp) vdrop(a->a_tvp); } void vop_rmdir_post(void *ap, int rc) { struct vop_rmdir_args *a = ap; if (!rc) { VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); } } void vop_setattr_post(void *ap, int rc) { struct vop_setattr_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); } void vop_setextattr_post(void *ap, int rc) { struct vop_setextattr_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); } void vop_symlink_post(void *ap, int rc) { struct vop_symlink_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); } void vop_open_post(void *ap, int rc) { struct vop_open_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); } void vop_close_post(void *ap, int rc) { struct vop_close_args *a = ap; if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ (a->a_vp->v_iflag & VI_DOOMED) == 0)) { VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? NOTE_CLOSE_WRITE : NOTE_CLOSE); } } void vop_read_post(void *ap, int rc) { struct vop_read_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); } void vop_readdir_post(void *ap, int rc) { struct vop_readdir_args *a = ap; if (!rc) VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); } static struct knlist fs_knlist; static void vfs_event_init(void *arg) { knlist_init_mtx(&fs_knlist, NULL); } /* XXX - correct order? */ SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); void vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) { KNOTE_UNLOCKED(&fs_knlist, event); } static int filt_fsattach(struct knote *kn); static void filt_fsdetach(struct knote *kn); static int filt_fsevent(struct knote *kn, long hint); struct filterops fs_filtops = { .f_isfd = 0, .f_attach = filt_fsattach, .f_detach = filt_fsdetach, .f_event = filt_fsevent }; static int filt_fsattach(struct knote *kn) { kn->kn_flags |= EV_CLEAR; knlist_add(&fs_knlist, kn, 0); return (0); } static void filt_fsdetach(struct knote *kn) { knlist_remove(&fs_knlist, kn, 0); } static int filt_fsevent(struct knote *kn, long hint) { kn->kn_fflags |= hint; return (kn->kn_fflags != 0); } static int sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) { struct vfsidctl vc; int error; struct mount *mp; error = SYSCTL_IN(req, &vc, sizeof(vc)); if (error) return (error); if (vc.vc_vers != VFS_CTL_VERS1) return (EINVAL); mp = vfs_getvfs(&vc.vc_fsid); if (mp == NULL) return (ENOENT); /* ensure that a specific sysctl goes to the right filesystem. */ if (strcmp(vc.vc_fstypename, "*") != 0 && strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { vfs_rel(mp); return (EINVAL); } VCTLTOREQ(&vc, req); error = VFS_SYSCTL(mp, vc.vc_op, req); vfs_rel(mp); return (error); } SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, NULL, 0, sysctl_vfs_ctl, "", "Sysctl by fsid"); /* * Function to initialize a va_filerev field sensibly. * XXX: Wouldn't a random number make a lot more sense ?? */ u_quad_t init_va_filerev(void) { struct bintime bt; getbinuptime(&bt); return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); } static int filt_vfsread(struct knote *kn, long hint); static int filt_vfswrite(struct knote *kn, long hint); static int filt_vfsvnode(struct knote *kn, long hint); static void filt_vfsdetach(struct knote *kn); static struct filterops vfsread_filtops = { .f_isfd = 1, .f_detach = filt_vfsdetach, .f_event = filt_vfsread }; static struct filterops vfswrite_filtops = { .f_isfd = 1, .f_detach = filt_vfsdetach, .f_event = filt_vfswrite }; static struct filterops vfsvnode_filtops = { .f_isfd = 1, .f_detach = filt_vfsdetach, .f_event = filt_vfsvnode }; static void vfs_knllock(void *arg) { struct vnode *vp = arg; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } static void vfs_knlunlock(void *arg) { struct vnode *vp = arg; VOP_UNLOCK(vp, 0); } static void vfs_knl_assert_locked(void *arg) { #ifdef DEBUG_VFS_LOCKS struct vnode *vp = arg; ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); #endif } static void vfs_knl_assert_unlocked(void *arg) { #ifdef DEBUG_VFS_LOCKS struct vnode *vp = arg; ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); #endif } int vfs_kqfilter(struct vop_kqfilter_args *ap) { struct vnode *vp = ap->a_vp; struct knote *kn = ap->a_kn; struct knlist *knl; switch (kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &vfsread_filtops; break; case EVFILT_WRITE: kn->kn_fop = &vfswrite_filtops; break; case EVFILT_VNODE: kn->kn_fop = &vfsvnode_filtops; break; default: return (EINVAL); } kn->kn_hook = (caddr_t)vp; v_addpollinfo(vp); if (vp->v_pollinfo == NULL) return (ENOMEM); knl = &vp->v_pollinfo->vpi_selinfo.si_note; vhold(vp); knlist_add(knl, kn, 0); return (0); } /* * Detach knote from vnode */ static void filt_vfsdetach(struct knote *kn) { struct vnode *vp = (struct vnode *)kn->kn_hook; KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); vdrop(vp); } /*ARGSUSED*/ static int filt_vfsread(struct knote *kn, long hint) { struct vnode *vp = (struct vnode *)kn->kn_hook; struct vattr va; int res; /* * filesystem is gone, so set the EOF flag and schedule * the knote for deletion. */ if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { VI_LOCK(vp); kn->kn_flags |= (EV_EOF | EV_ONESHOT); VI_UNLOCK(vp); return (1); } if (VOP_GETATTR(vp, &va, curthread->td_ucred)) return (0); VI_LOCK(vp); kn->kn_data = va.va_size - kn->kn_fp->f_offset; res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; VI_UNLOCK(vp); return (res); } /*ARGSUSED*/ static int filt_vfswrite(struct knote *kn, long hint) { struct vnode *vp = (struct vnode *)kn->kn_hook; VI_LOCK(vp); /* * filesystem is gone, so set the EOF flag and schedule * the knote for deletion. */ if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) kn->kn_flags |= (EV_EOF | EV_ONESHOT); kn->kn_data = 0; VI_UNLOCK(vp); return (1); } static int filt_vfsvnode(struct knote *kn, long hint) { struct vnode *vp = (struct vnode *)kn->kn_hook; int res; VI_LOCK(vp); if (kn->kn_sfflags & hint) kn->kn_fflags |= hint; if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { kn->kn_flags |= EV_EOF; VI_UNLOCK(vp); return (1); } res = (kn->kn_fflags != 0); VI_UNLOCK(vp); return (res); } int vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) { int error; if (dp->d_reclen > ap->a_uio->uio_resid) return (ENAMETOOLONG); error = uiomove(dp, dp->d_reclen, ap->a_uio); if (error) { if (ap->a_ncookies != NULL) { if (ap->a_cookies != NULL) free(ap->a_cookies, M_TEMP); ap->a_cookies = NULL; *ap->a_ncookies = 0; } return (error); } if (ap->a_ncookies == NULL) return (0); KASSERT(ap->a_cookies, ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); *ap->a_cookies = realloc(*ap->a_cookies, (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); (*ap->a_cookies)[*ap->a_ncookies] = off; *ap->a_ncookies += 1; return (0); } /* * Mark for update the access time of the file if the filesystem * supports VOP_MARKATIME. This functionality is used by execve and * mmap, so we want to avoid the I/O implied by directly setting * va_atime for the sake of efficiency. */ void vfs_mark_atime(struct vnode *vp, struct ucred *cred) { struct mount *mp; mp = vp->v_mount; ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) (void)VOP_MARKATIME(vp); } /* * The purpose of this routine is to remove granularity from accmode_t, * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, * VADMIN and VAPPEND. * * If it returns 0, the caller is supposed to continue with the usual * access checks using 'accmode' as modified by this routine. If it * returns nonzero value, the caller is supposed to return that value * as errno. * * Note that after this routine runs, accmode may be zero. */ int vfs_unixify_accmode(accmode_t *accmode) { /* * There is no way to specify explicit "deny" rule using * file mode or POSIX.1e ACLs. */ if (*accmode & VEXPLICIT_DENY) { *accmode = 0; return (0); } /* * None of these can be translated into usual access bits. * Also, the common case for NFSv4 ACLs is to not contain * either of these bits. Caller should check for VWRITE * on the containing directory instead. */ if (*accmode & (VDELETE_CHILD | VDELETE)) return (EPERM); if (*accmode & VADMIN_PERMS) { *accmode &= ~VADMIN_PERMS; *accmode |= VADMIN; } /* * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL * or VSYNCHRONIZE using file mode or POSIX.1e ACL. */ *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); return (0); } /* * These are helper functions for filesystems to traverse all * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. * * This interface replaces MNT_VNODE_FOREACH. */ MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); struct vnode * __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) { struct vnode *vp; if (should_yield()) kern_yield(PRI_USER); MNT_ILOCK(mp); KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); vp = TAILQ_NEXT(*mvp, v_nmntvnodes); while (vp != NULL && (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0)) vp = TAILQ_NEXT(vp, v_nmntvnodes); /* Check if we are done */ if (vp == NULL) { __mnt_vnode_markerfree_all(mvp, mp); /* MNT_IUNLOCK(mp); -- done in above function */ mtx_assert(MNT_MTX(mp), MA_NOTOWNED); return (NULL); } TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); VI_LOCK(vp); MNT_IUNLOCK(mp); return (vp); } struct vnode * __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) { struct vnode *vp; *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); MNT_ILOCK(mp); MNT_REF(mp); (*mvp)->v_type = VMARKER; vp = TAILQ_FIRST(&mp->mnt_nvnodelist); while (vp != NULL && (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0)) vp = TAILQ_NEXT(vp, v_nmntvnodes); /* Check if we are done */ if (vp == NULL) { MNT_REL(mp); MNT_IUNLOCK(mp); free(*mvp, M_VNODE_MARKER); *mvp = NULL; return (NULL); } (*mvp)->v_mount = mp; TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); VI_LOCK(vp); MNT_IUNLOCK(mp); return (vp); } void __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) { if (*mvp == NULL) { MNT_IUNLOCK(mp); return; } mtx_assert(MNT_MTX(mp), MA_OWNED); KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); MNT_REL(mp); MNT_IUNLOCK(mp); free(*mvp, M_VNODE_MARKER); *mvp = NULL; } /* * These are helper functions for filesystems to traverse their * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h */ static void mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) { KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); MNT_ILOCK(mp); MNT_REL(mp); MNT_IUNLOCK(mp); free(*mvp, M_VNODE_MARKER); *mvp = NULL; } /* * Relock the mp mount vnode list lock with the vp vnode interlock in the * conventional lock order during mnt_vnode_next_active iteration. * * On entry, the mount vnode list lock is held and the vnode interlock is not. * The list lock is dropped and reacquired. On success, both locks are held. * On failure, the mount vnode list lock is held but the vnode interlock is * not, and the procedure may have yielded. */ static bool mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, struct vnode *vp) { const struct vnode *tmp; bool held, ret; VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, ("%s: bad marker", __func__)); VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, ("%s: inappropriate vnode", __func__)); ASSERT_VI_UNLOCKED(vp, __func__); mtx_assert(&mp->mnt_listmtx, MA_OWNED); ret = false; TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); /* * Use a hold to prevent vp from disappearing while the mount vnode * list lock is dropped and reacquired. Normally a hold would be * acquired with vhold(), but that might try to acquire the vnode * interlock, which would be a LOR with the mount vnode list lock. */ held = vfs_refcount_acquire_if_not_zero(&vp->v_holdcnt); mtx_unlock(&mp->mnt_listmtx); if (!held) goto abort; VI_LOCK(vp); if (!vfs_refcount_release_if_not_last(&vp->v_holdcnt)) { vdropl(vp); goto abort; } mtx_lock(&mp->mnt_listmtx); /* * Determine whether the vnode is still the next one after the marker, * excepting any other markers. If the vnode has not been doomed by * vgone() then the hold should have ensured that it remained on the * active list. If it has been doomed but is still on the active list, * don't abort, but rather skip over it (avoid spinning on doomed * vnodes). */ tmp = mvp; do { tmp = TAILQ_NEXT(tmp, v_actfreelist); } while (tmp != NULL && tmp->v_type == VMARKER); if (tmp != vp) { mtx_unlock(&mp->mnt_listmtx); VI_UNLOCK(vp); goto abort; } ret = true; goto out; abort: maybe_yield(); mtx_lock(&mp->mnt_listmtx); out: if (ret) ASSERT_VI_LOCKED(vp, __func__); else ASSERT_VI_UNLOCKED(vp, __func__); mtx_assert(&mp->mnt_listmtx, MA_OWNED); return (ret); } static struct vnode * mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) { struct vnode *vp, *nvp; mtx_assert(&mp->mnt_listmtx, MA_OWNED); KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); restart: vp = TAILQ_NEXT(*mvp, v_actfreelist); while (vp != NULL) { if (vp->v_type == VMARKER) { vp = TAILQ_NEXT(vp, v_actfreelist); continue; } /* * Try-lock because this is the wrong lock order. If that does * not succeed, drop the mount vnode list lock and try to * reacquire it and the vnode interlock in the right order. */ if (!VI_TRYLOCK(vp) && !mnt_vnode_next_active_relock(*mvp, mp, vp)) goto restart; KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); KASSERT(vp->v_mount == mp || vp->v_mount == NULL, ("alien vnode on the active list %p %p", vp, mp)); if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) break; nvp = TAILQ_NEXT(vp, v_actfreelist); VI_UNLOCK(vp); vp = nvp; } TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); /* Check if we are done */ if (vp == NULL) { mtx_unlock(&mp->mnt_listmtx); mnt_vnode_markerfree_active(mvp, mp); return (NULL); } TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); mtx_unlock(&mp->mnt_listmtx); ASSERT_VI_LOCKED(vp, "active iter"); KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); return (vp); } struct vnode * __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) { if (should_yield()) kern_yield(PRI_USER); mtx_lock(&mp->mnt_listmtx); return (mnt_vnode_next_active(mvp, mp)); } struct vnode * __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) { struct vnode *vp; *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); MNT_ILOCK(mp); MNT_REF(mp); MNT_IUNLOCK(mp); (*mvp)->v_type = VMARKER; (*mvp)->v_mount = mp; mtx_lock(&mp->mnt_listmtx); vp = TAILQ_FIRST(&mp->mnt_activevnodelist); if (vp == NULL) { mtx_unlock(&mp->mnt_listmtx); mnt_vnode_markerfree_active(mvp, mp); return (NULL); } TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); return (mnt_vnode_next_active(mvp, mp)); } void __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) { if (*mvp == NULL) return; mtx_lock(&mp->mnt_listmtx); TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); mtx_unlock(&mp->mnt_listmtx); mnt_vnode_markerfree_active(mvp, mp); } Index: projects/clang500-import/sys/kern/vnode_if.src =================================================================== --- projects/clang500-import/sys/kern/vnode_if.src (revision 319548) +++ projects/clang500-import/sys/kern/vnode_if.src (revision 319549) @@ -1,752 +1,752 @@ #- # Copyright (c) 1992, 1993 # The Regents of the University of California. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the University nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # @(#)vnode_if.src 8.12 (Berkeley) 5/14/95 # $FreeBSD$ # # # Above each of the vop descriptors in lines starting with %% # is a specification of the locking protocol used by each vop call. # The first column is the name of the variable, the remaining three # columns are in, out and error respectively. The "in" column defines # the lock state on input, the "out" column defines the state on successful # return, and the "error" column defines the locking state on error exit. # # The locking value can take the following values: # L: locked; not converted to type of lock. # E: locked with exclusive lock for this process. # U: unlocked. # -: not applicable. vnode does not yet (or no longer) exists. # =: the same on input and output, may be either L or U. # # The paramater named "vpp" is assumed to be always used with double # indirection (**vpp) and that name is hard-coded in vnode_if.awk ! # # Lines starting with %! specify a pre or post-condition function # to call before/after the vop call. # # If other such parameters are introduced, they have to be added to # the AWK script at the head of the definition of "add_debug_code()". # vop_islocked { IN struct vnode *vp; }; %% lookup dvp L L L %% lookup vpp - L - # XXX - the lookup locking protocol defies simple description and depends # on the flags and operation fields in the (cnp) structure. Note # especially that *vpp may equal dvp and both may be locked. vop_lookup { IN struct vnode *dvp; INOUT struct vnode **vpp; IN struct componentname *cnp; }; %% cachedlookup dvp L L L %% cachedlookup vpp - L - # This must be an exact copy of lookup. See kern/vfs_cache.c for details. vop_cachedlookup { IN struct vnode *dvp; INOUT struct vnode **vpp; IN struct componentname *cnp; }; %% create dvp E E E %% create vpp - L - %! create post vop_create_post vop_create { IN struct vnode *dvp; OUT struct vnode **vpp; IN struct componentname *cnp; IN struct vattr *vap; }; %% whiteout dvp E E E vop_whiteout { IN struct vnode *dvp; IN struct componentname *cnp; IN int flags; }; %% mknod dvp E E E %% mknod vpp - L - %! mknod post vop_mknod_post vop_mknod { IN struct vnode *dvp; OUT struct vnode **vpp; IN struct componentname *cnp; IN struct vattr *vap; }; %% open vp L L L %! open post vop_open_post vop_open { IN struct vnode *vp; IN int mode; IN struct ucred *cred; IN struct thread *td; IN struct file *fp; }; %% close vp L L L %! close post vop_close_post vop_close { IN struct vnode *vp; IN int fflag; IN struct ucred *cred; IN struct thread *td; }; %% access vp L L L vop_access { IN struct vnode *vp; IN accmode_t accmode; IN struct ucred *cred; IN struct thread *td; }; %% accessx vp L L L vop_accessx { IN struct vnode *vp; IN accmode_t accmode; IN struct ucred *cred; IN struct thread *td; }; %% getattr vp L L L vop_getattr { IN struct vnode *vp; OUT struct vattr *vap; IN struct ucred *cred; }; %% setattr vp E E E %! setattr post vop_setattr_post vop_setattr { IN struct vnode *vp; IN struct vattr *vap; IN struct ucred *cred; }; %% markatime vp L L L vop_markatime { IN struct vnode *vp; }; %% read vp L L L %! read post vop_read_post vop_read { IN struct vnode *vp; INOUT struct uio *uio; IN int ioflag; IN struct ucred *cred; }; %% write vp L L L %! write pre VOP_WRITE_PRE %! write post VOP_WRITE_POST vop_write { IN struct vnode *vp; INOUT struct uio *uio; IN int ioflag; IN struct ucred *cred; }; %% ioctl vp U U U vop_ioctl { IN struct vnode *vp; IN u_long command; IN void *data; IN int fflag; IN struct ucred *cred; IN struct thread *td; }; %% poll vp U U U vop_poll { IN struct vnode *vp; IN int events; IN struct ucred *cred; IN struct thread *td; }; %% kqfilter vp U U U vop_kqfilter { IN struct vnode *vp; IN struct knote *kn; }; %% revoke vp L L L vop_revoke { IN struct vnode *vp; IN int flags; }; %% fsync vp L L L vop_fsync { IN struct vnode *vp; IN int waitfor; IN struct thread *td; }; %% remove dvp E E E %% remove vp E E E %! remove post vop_remove_post vop_remove { IN struct vnode *dvp; IN struct vnode *vp; IN struct componentname *cnp; }; %% link tdvp E E E %% link vp E E E %! link post vop_link_post vop_link { IN struct vnode *tdvp; IN struct vnode *vp; IN struct componentname *cnp; }; %! rename pre vop_rename_pre %! rename post vop_rename_post vop_rename { IN WILLRELE struct vnode *fdvp; IN WILLRELE struct vnode *fvp; IN struct componentname *fcnp; IN WILLRELE struct vnode *tdvp; IN WILLRELE struct vnode *tvp; IN struct componentname *tcnp; }; %% mkdir dvp E E E %% mkdir vpp - E - %! mkdir post vop_mkdir_post vop_mkdir { IN struct vnode *dvp; OUT struct vnode **vpp; IN struct componentname *cnp; IN struct vattr *vap; }; %% rmdir dvp E E E %% rmdir vp E E E %! rmdir post vop_rmdir_post vop_rmdir { IN struct vnode *dvp; IN struct vnode *vp; IN struct componentname *cnp; }; %% symlink dvp E E E %% symlink vpp - E - %! symlink post vop_symlink_post vop_symlink { IN struct vnode *dvp; OUT struct vnode **vpp; IN struct componentname *cnp; IN struct vattr *vap; IN char *target; }; %% readdir vp L L L %! readdir post vop_readdir_post vop_readdir { IN struct vnode *vp; INOUT struct uio *uio; IN struct ucred *cred; INOUT int *eofflag; OUT int *ncookies; INOUT u_long **cookies; }; %% readlink vp L L L vop_readlink { IN struct vnode *vp; INOUT struct uio *uio; IN struct ucred *cred; }; %% inactive vp E E E vop_inactive { IN struct vnode *vp; IN struct thread *td; }; %% reclaim vp E E E %! reclaim post vop_reclaim_post vop_reclaim { IN struct vnode *vp; IN struct thread *td; }; %! lock1 pre vop_lock_pre %! lock1 post vop_lock_post vop_lock1 { IN struct vnode *vp; IN int flags; IN char *file; IN int line; }; %! unlock pre vop_unlock_pre %! unlock post vop_unlock_post vop_unlock { IN struct vnode *vp; IN int flags; }; %% bmap vp L L L vop_bmap { IN struct vnode *vp; IN daddr_t bn; OUT struct bufobj **bop; IN daddr_t *bnp; OUT int *runp; OUT int *runb; }; %% strategy vp L L L %! strategy pre vop_strategy_pre vop_strategy { IN struct vnode *vp; IN struct buf *bp; }; %% getwritemount vp = = = vop_getwritemount { IN struct vnode *vp; OUT struct mount **mpp; }; %% print vp - - - vop_print { IN struct vnode *vp; }; %% pathconf vp L L L vop_pathconf { IN struct vnode *vp; IN int name; OUT register_t *retval; }; %% advlock vp U U U vop_advlock { IN struct vnode *vp; IN void *id; IN int op; IN struct flock *fl; IN int flags; }; %% advlockasync vp U U U vop_advlockasync { IN struct vnode *vp; IN void *id; IN int op; IN struct flock *fl; IN int flags; IN struct task *task; INOUT void **cookiep; }; %% advlockpurge vp E E E vop_advlockpurge { IN struct vnode *vp; }; %% reallocblks vp E E E vop_reallocblks { IN struct vnode *vp; IN struct cluster_save *buflist; }; %% getpages vp L L L vop_getpages { IN struct vnode *vp; IN vm_page_t *m; IN int count; IN int *rbehind; IN int *rahead; }; %% getpages_async vp L L L vop_getpages_async { IN struct vnode *vp; IN vm_page_t *m; IN int count; IN int *rbehind; IN int *rahead; IN vop_getpages_iodone_t *iodone; IN void *arg; }; %% putpages vp L L L vop_putpages { IN struct vnode *vp; IN vm_page_t *m; IN int count; IN int sync; IN int *rtvals; }; %% getacl vp L L L vop_getacl { IN struct vnode *vp; IN acl_type_t type; OUT struct acl *aclp; IN struct ucred *cred; IN struct thread *td; }; %% setacl vp E E E vop_setacl { IN struct vnode *vp; IN acl_type_t type; IN struct acl *aclp; IN struct ucred *cred; IN struct thread *td; }; %% aclcheck vp = = = vop_aclcheck { IN struct vnode *vp; IN acl_type_t type; IN struct acl *aclp; IN struct ucred *cred; IN struct thread *td; }; %% closeextattr vp L L L vop_closeextattr { IN struct vnode *vp; IN int commit; IN struct ucred *cred; IN struct thread *td; }; %% getextattr vp L L L vop_getextattr { IN struct vnode *vp; IN int attrnamespace; IN const char *name; INOUT struct uio *uio; OUT size_t *size; IN struct ucred *cred; IN struct thread *td; }; %% listextattr vp L L L vop_listextattr { IN struct vnode *vp; IN int attrnamespace; INOUT struct uio *uio; OUT size_t *size; IN struct ucred *cred; IN struct thread *td; }; %% openextattr vp L L L vop_openextattr { IN struct vnode *vp; IN struct ucred *cred; IN struct thread *td; }; %% deleteextattr vp E E E %! deleteextattr post vop_deleteextattr_post vop_deleteextattr { IN struct vnode *vp; IN int attrnamespace; IN const char *name; IN struct ucred *cred; IN struct thread *td; }; %% setextattr vp E E E %! setextattr post vop_setextattr_post vop_setextattr { IN struct vnode *vp; IN int attrnamespace; IN const char *name; INOUT struct uio *uio; IN struct ucred *cred; IN struct thread *td; }; %% setlabel vp E E E vop_setlabel { IN struct vnode *vp; IN struct label *label; IN struct ucred *cred; IN struct thread *td; }; %% vptofh vp = = = vop_vptofh { IN struct vnode *vp; IN struct fid *fhp; }; %% vptocnp vp L L L %% vptocnp vpp - U - vop_vptocnp { IN struct vnode *vp; OUT struct vnode **vpp; IN struct ucred *cred; INOUT char *buf; INOUT int *buflen; }; %% allocate vp E E E vop_allocate { IN struct vnode *vp; INOUT off_t *offset; INOUT off_t *len; }; %% advise vp U U U vop_advise { IN struct vnode *vp; IN off_t start; IN off_t end; IN int advice; }; %% unp_bind vp E E E vop_unp_bind { IN struct vnode *vp; - IN struct socket *socket; + IN struct unpcb *unpcb; }; %% unp_connect vp L L L vop_unp_connect { IN struct vnode *vp; - OUT struct socket **socket; + OUT struct unpcb **unpcb; }; %% unp_detach vp = = = vop_unp_detach { IN struct vnode *vp; }; %% is_text vp L L L vop_is_text { IN struct vnode *vp; }; %% set_text vp E E E vop_set_text { IN struct vnode *vp; }; %% vop_unset_text vp E E E vop_unset_text { IN struct vnode *vp; }; %% get_writecount vp L L L vop_get_writecount { IN struct vnode *vp; OUT int *writecount; }; %% add_writecount vp E E E vop_add_writecount { IN struct vnode *vp; IN int inc; }; %% fdatasync vp L L L vop_fdatasync { IN struct vnode *vp; IN struct thread *td; }; # The VOPs below are spares at the end of the table to allow new VOPs to be # added in stable branches without breaking the KBI. New VOPs in HEAD should # be added above these spares. When merging a new VOP to a stable branch, # the new VOP should replace one of the spares. vop_spare1 { IN struct vnode *vp; }; vop_spare2 { IN struct vnode *vp; }; vop_spare3 { IN struct vnode *vp; }; vop_spare4 { IN struct vnode *vp; }; vop_spare5 { IN struct vnode *vp; }; Index: projects/clang500-import/sys/sys/sockopt.h =================================================================== --- projects/clang500-import/sys/sys/sockopt.h (revision 319548) +++ projects/clang500-import/sys/sys/sockopt.h (revision 319549) @@ -1,72 +1,72 @@ /*- * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)socketvar.h 8.3 (Berkeley) 2/19/95 * * $FreeBSD$ */ #ifndef _SYS_SOCKOPT_H_ #define _SYS_SOCKOPT_H_ #ifndef _KERNEL #error "no user-serviceable parts inside" #endif struct thread; struct socket; /* * Argument structure for sosetopt et seq. This is in the KERNEL * section because it will never be visible to user code. */ enum sopt_dir { SOPT_GET, SOPT_SET }; struct sockopt { enum sopt_dir sopt_dir; /* is this a get or a set? */ int sopt_level; /* second arg of [gs]etsockopt */ int sopt_name; /* third arg of [gs]etsockopt */ void *sopt_val; /* fourth arg of [gs]etsockopt */ size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */ struct thread *sopt_td; /* calling thread or null if kernel */ }; int sosetopt(struct socket *so, struct sockopt *sopt); int sogetopt(struct socket *so, struct sockopt *sopt); int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen); int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len); /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ int soopt_getm(struct sockopt *sopt, struct mbuf **mp); int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m); int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m); -int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt); -int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); +int accept_filt_getopt(struct socket *, struct sockopt *); +int accept_filt_setopt(struct socket *, struct sockopt *); int so_setsockopt(struct socket *so, int level, int optname, void *optval, size_t optlen); #endif /* _SYS_SOCKOPT_H_ */ Index: projects/clang500-import/sys/sys/unpcb.h =================================================================== --- projects/clang500-import/sys/sys/unpcb.h (revision 319548) +++ projects/clang500-import/sys/sys/unpcb.h (revision 319549) @@ -1,153 +1,147 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)unpcb.h 8.1 (Berkeley) 6/2/93 * $FreeBSD$ */ #ifndef _SYS_UNPCB_H_ #define _SYS_UNPCB_H_ #include #include /* * Protocol control block for an active * instance of a UNIX internal protocol. * * A socket may be associated with a vnode in the * filesystem. If so, the unp_vnode pointer holds * a reference count to this vnode, which should be irele'd * when the socket goes away. * * A socket may be connected to another socket, in which * case the control block of the socket to which it is connected * is given by unp_conn. * * A socket may be referenced by a number of sockets (e.g. several * sockets may be connected to a datagram socket.) These sockets * are in a linked list starting with unp_refs, linked through * unp_nextref and null-terminated. Note that a socket may be referenced * by a number of other sockets and may also reference a socket (not * necessarily one which is referencing it). This generates * the need for unp_refs and unp_nextref to be separate fields. * * Stream sockets keep copies of receive sockbuf sb_cc and sb_mbcnt * so that changes in the sockbuf may be computed to modify * back pressure on the sender accordingly. */ typedef u_quad_t unp_gen_t; LIST_HEAD(unp_head, unpcb); struct unpcb { LIST_ENTRY(unpcb) unp_link; /* glue on list of all PCBs */ struct socket *unp_socket; /* pointer back to socket */ struct file *unp_file; /* back-pointer to file for gc. */ struct vnode *unp_vnode; /* if associated with file */ ino_t unp_ino; /* fake inode number */ struct unpcb *unp_conn; /* control block of connected socket */ struct unp_head unp_refs; /* referencing socket linked list */ LIST_ENTRY(unpcb) unp_reflink; /* link in unp_refs list */ struct sockaddr_un *unp_addr; /* bound address of socket */ int reserved1; int reserved2; unp_gen_t unp_gencnt; /* generation count of this instance */ short unp_flags; /* flags */ short unp_gcflag; /* Garbage collector flags. */ struct xucred unp_peercred; /* peer credentials, if applicable */ u_int unp_refcount; u_int unp_msgcount; /* references from message queue */ struct mtx unp_mtx; /* mutex */ }; /* * Flags in unp_flags. * * UNP_HAVEPC - indicates that the unp_peercred member is filled in * and is really the credentials of the connected peer. This is used * to determine whether the contents should be sent to the user or * not. - * - * UNP_HAVEPCCACHED - indicates that the unp_peercred member is filled - * in, but does *not* contain the credentials of the connected peer - * (there may not even be a peer). This is set in unp_listen() when - * it fills in unp_peercred for later consumption by unp_connect(). */ #define UNP_HAVEPC 0x001 -#define UNP_HAVEPCCACHED 0x002 #define UNP_WANTCRED 0x004 /* credentials wanted */ #define UNP_CONNWAIT 0x008 /* connect blocks until accepted */ /* * These flags are used to handle non-atomicity in connect() and bind() * operations on a socket: in particular, to avoid races between multiple * threads or processes operating simultaneously on the same socket. */ #define UNP_CONNECTING 0x010 /* Currently connecting. */ #define UNP_BINDING 0x020 /* Currently binding. */ #define UNP_NASCENT 0x040 /* Newborn child socket. */ /* * Flags in unp_gcflag. */ #define UNPGC_REF 0x1 /* unpcb has external ref. */ #define UNPGC_DEAD 0x2 /* unpcb might be dead. */ #define UNPGC_SCANNED 0x4 /* Has been scanned. */ #define UNPGC_IGNORE_RIGHTS 0x8 /* Attached rights are freed */ #define sotounpcb(so) ((struct unpcb *)((so)->so_pcb)) /* Hack alert -- this structure depends on . */ #ifdef _SYS_SOCKETVAR_H_ struct xunpcb { size_t xu_len; /* length of this structure */ struct unpcb *xu_unpp; /* to help netstat, fstat */ struct unpcb xu_unp; /* our information */ union { struct sockaddr_un xuu_addr; /* our bound address */ char xu_dummy1[256]; } xu_au; #define xu_addr xu_au.xuu_addr union { struct sockaddr_un xuu_caddr; /* their bound address */ char xu_dummy2[256]; } xu_cau; #define xu_caddr xu_cau.xuu_caddr struct xsocket xu_socket; u_quad_t xu_alignment_hack; }; struct xunpgen { size_t xug_len; u_int xug_count; unp_gen_t xug_gen; so_gen_t xug_sogen; }; #endif /* _SYS_SOCKETVAR_H_ */ #endif /* _SYS_UNPCB_H_ */ Index: projects/clang500-import/sys/sys/vnode.h =================================================================== --- projects/clang500-import/sys/sys/vnode.h (revision 319548) +++ projects/clang500-import/sys/sys/vnode.h (revision 319549) @@ -1,892 +1,886 @@ /*- * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)vnode.h 8.7 (Berkeley) 2/4/94 * $FreeBSD$ */ #ifndef _SYS_VNODE_H_ #define _SYS_VNODE_H_ #include #include #include #include #include #include #include #include #include #include /* * The vnode is the focus of all file activity in UNIX. There is a * unique vnode allocated for each active file, each current directory, * each mounted-on file, text file, and the root. */ /* * Vnode types. VNON means no type. */ enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO, VBAD, VMARKER }; /* * Each underlying filesystem allocates its own private area and hangs * it from v_data. If non-null, this area is freed in getnewvnode(). */ struct namecache; struct vpollinfo { struct mtx vpi_lock; /* lock to protect below */ struct selinfo vpi_selinfo; /* identity of poller(s) */ short vpi_events; /* what they are looking for */ short vpi_revents; /* what has happened */ }; /* * Reading or writing any of these items requires holding the appropriate lock. * * Lock reference: * c - namecache mutex * i - interlock * l - mp mnt_listmtx or freelist mutex * I - updated with atomics, 0->1 and 1->0 transitions with interlock held * m - mount point interlock * p - pollinfo lock * u - Only a reference to the vnode is needed to read. * v - vnode lock * * Vnodes may be found on many lists. The general way to deal with operating * on a vnode that is on a list is: * 1) Lock the list and find the vnode. * 2) Lock interlock so that the vnode does not go away. * 3) Unlock the list to avoid lock order reversals. * 4) vget with LK_INTERLOCK and check for ENOENT, or * 5) Check for DOOMED if the vnode lock is not required. * 6) Perform your operation, then vput(). */ #if defined(_KERNEL) || defined(_KVM_VNODE) struct vnode { /* * Fields which define the identity of the vnode. These fields are * owned by the filesystem (XXX: and vgone() ?) */ const char *v_tag; /* u type of underlying data */ struct vop_vector *v_op; /* u vnode operations vector */ void *v_data; /* u private data for fs */ /* * Filesystem instance stuff */ struct mount *v_mount; /* u ptr to vfs we are in */ TAILQ_ENTRY(vnode) v_nmntvnodes; /* m vnodes for mount point */ /* * Type specific fields, only one applies to any given vnode. - * See #defines below for renaming to v_* namespace. */ union { - struct mount *vu_mount; /* v ptr to mountpoint (VDIR) */ - struct socket *vu_socket; /* v unix domain net (VSOCK) */ - struct cdev *vu_cdev; /* v device (VCHR, VBLK) */ - struct fifoinfo *vu_fifoinfo; /* v fifo (VFIFO) */ - } v_un; + struct mount *v_mountedhere; /* v ptr to mountpoint (VDIR) */ + struct unpcb *v_unpcb; /* v unix domain net (VSOCK) */ + struct cdev *v_rdev; /* v device (VCHR, VBLK) */ + struct fifoinfo *v_fifoinfo; /* v fifo (VFIFO) */ + }; /* * vfs_hash: (mount + inode) -> vnode hash. The hash value * itself is grouped with other int fields, to avoid padding. */ LIST_ENTRY(vnode) v_hashlist; /* * VFS_namecache stuff */ LIST_HEAD(, namecache) v_cache_src; /* c Cache entries from us */ TAILQ_HEAD(, namecache) v_cache_dst; /* c Cache entries to us */ struct namecache *v_cache_dd; /* c Cache entry for .. vnode */ /* * Locking */ struct lock v_lock; /* u (if fs don't have one) */ struct mtx v_interlock; /* lock for "i" things */ struct lock *v_vnlock; /* u pointer to vnode lock */ /* * The machinery of being a vnode */ TAILQ_ENTRY(vnode) v_actfreelist; /* l vnode active/free lists */ struct bufobj v_bufobj; /* * Buffer cache object */ /* * Hooks for various subsystems and features. */ struct vpollinfo *v_pollinfo; /* i Poll events, p for *v_pi */ struct label *v_label; /* MAC label for vnode */ struct lockf *v_lockf; /* Byte-level advisory lock list */ struct rangelock v_rl; /* Byte-range lock */ /* * clustering stuff */ daddr_t v_cstart; /* v start block of cluster */ daddr_t v_lasta; /* v last allocation */ daddr_t v_lastw; /* v last write */ int v_clen; /* v length of cur. cluster */ u_int v_holdcnt; /* I prevents recycling. */ u_int v_usecount; /* I ref count of users */ u_int v_iflag; /* i vnode flags (see below) */ u_int v_vflag; /* v vnode flags */ u_int v_mflag; /* l mnt-specific vnode flags */ int v_writecount; /* v ref count of writers */ u_int v_hash; enum vtype v_type; /* u vnode type */ }; #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */ -#define v_mountedhere v_un.vu_mount -#define v_socket v_un.vu_socket -#define v_rdev v_un.vu_cdev -#define v_fifoinfo v_un.vu_fifoinfo - #define bo2vnode(bo) __containerof((bo), struct vnode, v_bufobj) /* XXX: These are temporary to avoid a source sweep at this time */ #define v_object v_bufobj.bo_object /* * Userland version of struct vnode, for sysctl. */ struct xvnode { size_t xv_size; /* sizeof(struct xvnode) */ void *xv_vnode; /* address of real vnode */ u_long xv_flag; /* vnode vflags */ int xv_usecount; /* reference count of users */ int xv_writecount; /* reference count of writers */ int xv_holdcnt; /* page & buffer references */ u_long xv_id; /* capability identifier */ void *xv_mount; /* address of parent mount */ long xv_numoutput; /* num of writes in progress */ enum vtype xv_type; /* vnode type */ union { - void *xvu_socket; /* socket, if VSOCK */ + void *xvu_socket; /* unpcb, if VSOCK */ void *xvu_fifo; /* fifo, if VFIFO */ dev_t xvu_rdev; /* maj/min, if VBLK/VCHR */ struct { dev_t xvu_dev; /* device, if VDIR/VREG/VLNK */ ino_t xvu_ino; /* id, if VDIR/VREG/VLNK */ } xv_uns; } xv_un; }; #define xv_socket xv_un.xvu_socket #define xv_fifo xv_un.xvu_fifo #define xv_rdev xv_un.xvu_rdev #define xv_dev xv_un.xv_uns.xvu_dev #define xv_ino xv_un.xv_uns.xvu_ino /* We don't need to lock the knlist */ #define VN_KNLIST_EMPTY(vp) ((vp)->v_pollinfo == NULL || \ KNLIST_EMPTY(&(vp)->v_pollinfo->vpi_selinfo.si_note)) #define VN_KNOTE(vp, b, a) \ do { \ if (!VN_KNLIST_EMPTY(vp)) \ KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b), \ (a) | KNF_NOKQLOCK); \ } while (0) #define VN_KNOTE_LOCKED(vp, b) VN_KNOTE(vp, b, KNF_LISTLOCKED) #define VN_KNOTE_UNLOCKED(vp, b) VN_KNOTE(vp, b, 0) /* * Vnode flags. * VI flags are protected by interlock and live in v_iflag * VV flags are protected by the vnode lock and live in v_vflag * * VI_DOOMED is doubly protected by the interlock and vnode lock. Both * are required for writing but the status may be checked with either. */ #define VI_MOUNT 0x0020 /* Mount in progress */ #define VI_DOOMED 0x0080 /* This vnode is being recycled */ #define VI_FREE 0x0100 /* This vnode is on the freelist */ #define VI_ACTIVE 0x0200 /* This vnode is on the active list */ #define VI_DOINGINACT 0x0800 /* VOP_INACTIVE is in progress */ #define VI_OWEINACT 0x1000 /* Need to call inactive */ #define VV_ROOT 0x0001 /* root of its filesystem */ #define VV_ISTTY 0x0002 /* vnode represents a tty */ #define VV_NOSYNC 0x0004 /* unlinked, stop syncing */ #define VV_ETERNALDEV 0x0008 /* device that is never destroyed */ #define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */ #define VV_TEXT 0x0020 /* vnode is a pure text prototype */ #define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */ #define VV_SYSTEM 0x0080 /* vnode being used by kernel */ #define VV_PROCDEP 0x0100 /* vnode is process dependent */ #define VV_NOKNOTE 0x0200 /* don't activate knotes on this vnode */ #define VV_DELETED 0x0400 /* should be removed */ #define VV_MD 0x0800 /* vnode backs the md device */ #define VV_FORCEINSMQ 0x1000 /* force the insmntque to succeed */ #define VMP_TMPMNTFREELIST 0x0001 /* Vnode is on mnt's tmp free list */ /* * Vnode attributes. A field value of VNOVAL represents a field whose value * is unavailable (getattr) or which is not to be changed (setattr). */ struct vattr { enum vtype va_type; /* vnode type (for create) */ u_short va_mode; /* files access mode and type */ u_short va_padding0; uid_t va_uid; /* owner user id */ gid_t va_gid; /* owner group id */ nlink_t va_nlink; /* number of references to file */ dev_t va_fsid; /* filesystem id */ ino_t va_fileid; /* file id */ u_quad_t va_size; /* file size in bytes */ long va_blocksize; /* blocksize preferred for i/o */ struct timespec va_atime; /* time of last access */ struct timespec va_mtime; /* time of last modification */ struct timespec va_ctime; /* time file changed */ struct timespec va_birthtime; /* time file created */ u_long va_gen; /* generation number of file */ u_long va_flags; /* flags defined for file */ dev_t va_rdev; /* device the special file represents */ u_quad_t va_bytes; /* bytes of disk space held by file */ u_quad_t va_filerev; /* file modification number */ u_int va_vaflags; /* operations flags, see below */ long va_spare; /* remain quad aligned */ }; /* * Flags for va_vaflags. */ #define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */ #define VA_EXCLUSIVE 0x02 /* exclusive create request */ #define VA_SYNC 0x04 /* O_SYNC truncation */ /* * Flags for ioflag. (high 16 bits used to ask for read-ahead and * help with write clustering) * NB: IO_NDELAY and IO_DIRECT are linked to fcntl.h */ #define IO_UNIT 0x0001 /* do I/O as atomic unit */ #define IO_APPEND 0x0002 /* append write to end */ #define IO_NDELAY 0x0004 /* FNDELAY flag set in file table */ #define IO_NODELOCKED 0x0008 /* underlying node already locked */ #define IO_ASYNC 0x0010 /* bawrite rather then bdwrite */ #define IO_VMIO 0x0020 /* data already in VMIO space */ #define IO_INVAL 0x0040 /* invalidate after I/O */ #define IO_SYNC 0x0080 /* do I/O synchronously */ #define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */ #define IO_NOREUSE 0x0200 /* VMIO data won't be reused */ #define IO_EXT 0x0400 /* operate on external attributes */ #define IO_NORMAL 0x0800 /* operate on regular data */ #define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */ #define IO_BUFLOCKED 0x2000 /* ffs flag; indir buf is locked */ #define IO_RANGELOCKED 0x4000 /* range locked */ #define IO_SEQMAX 0x7F /* seq heuristic max value */ #define IO_SEQSHIFT 16 /* seq heuristic in upper 16 bits */ /* * Flags for accmode_t. */ #define VEXEC 000000000100 /* execute/search permission */ #define VWRITE 000000000200 /* write permission */ #define VREAD 000000000400 /* read permission */ #define VADMIN 000000010000 /* being the file owner */ #define VAPPEND 000000040000 /* permission to write/append */ /* * VEXPLICIT_DENY makes VOP_ACCESSX(9) return EPERM or EACCES only * if permission was denied explicitly, by a "deny" rule in NFSv4 ACL, * and 0 otherwise. This never happens with ordinary unix access rights * or POSIX.1e ACLs. Obviously, VEXPLICIT_DENY must be OR-ed with * some other V* constant. */ #define VEXPLICIT_DENY 000000100000 #define VREAD_NAMED_ATTRS 000000200000 /* not used */ #define VWRITE_NAMED_ATTRS 000000400000 /* not used */ #define VDELETE_CHILD 000001000000 #define VREAD_ATTRIBUTES 000002000000 /* permission to stat(2) */ #define VWRITE_ATTRIBUTES 000004000000 /* change {m,c,a}time */ #define VDELETE 000010000000 #define VREAD_ACL 000020000000 /* read ACL and file mode */ #define VWRITE_ACL 000040000000 /* change ACL and/or file mode */ #define VWRITE_OWNER 000100000000 /* change file owner */ #define VSYNCHRONIZE 000200000000 /* not used */ #define VCREAT 000400000000 /* creating new file */ #define VVERIFY 001000000000 /* verification required */ /* * Permissions that were traditionally granted only to the file owner. */ #define VADMIN_PERMS (VADMIN | VWRITE_ATTRIBUTES | VWRITE_ACL | \ VWRITE_OWNER) /* * Permissions that were traditionally granted to everyone. */ #define VSTAT_PERMS (VREAD_ATTRIBUTES | VREAD_ACL) /* * Permissions that allow to change the state of the file in any way. */ #define VMODIFY_PERMS (VWRITE | VAPPEND | VADMIN_PERMS | VDELETE_CHILD | \ VDELETE) /* * Token indicating no attribute value yet assigned. */ #define VNOVAL (-1) /* * LK_TIMELOCK timeout for vnode locks (used mainly by the pageout daemon) */ #define VLKTIMEOUT (hz / 20 + 1) #ifdef _KERNEL #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_VNODE); #endif extern u_int ncsizefactor; /* * Convert between vnode types and inode formats (since POSIX.1 * defines mode word of stat structure in terms of inode formats). */ extern enum vtype iftovt_tab[]; extern int vttoif_tab[]; #define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12]) #define VTTOIF(indx) (vttoif_tab[(int)(indx)]) #define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode)) /* * Flags to various vnode functions. */ #define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */ #define FORCECLOSE 0x0002 /* vflush: force file closure */ #define WRITECLOSE 0x0004 /* vflush: only close writable files */ #define EARLYFLUSH 0x0008 /* vflush: early call for ffs_flushfiles */ #define V_SAVE 0x0001 /* vinvalbuf: sync file first */ #define V_ALT 0x0002 /* vinvalbuf: invalidate only alternate bufs */ #define V_NORMAL 0x0004 /* vinvalbuf: invalidate only regular bufs */ #define V_CLEANONLY 0x0008 /* vinvalbuf: invalidate only clean bufs */ #define V_VMIO 0x0010 /* vinvalbuf: called during pageout */ #define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */ #define V_WAIT 0x0001 /* vn_start_write: sleep for suspend */ #define V_NOWAIT 0x0002 /* vn_start_write: don't sleep for suspend */ #define V_XSLEEP 0x0004 /* vn_start_write: just return after sleep */ #define V_MNTREF 0x0010 /* vn_start_write: mp is already ref-ed */ #define VR_START_WRITE 0x0001 /* vfs_write_resume: start write atomically */ #define VR_NO_SUSPCLR 0x0002 /* vfs_write_resume: do not clear suspension */ #define VS_SKIP_UNMOUNT 0x0001 /* vfs_write_suspend: fail if the filesystem is being unmounted */ #define VREF(vp) vref(vp) #ifdef DIAGNOSTIC #define VATTR_NULL(vap) vattr_null(vap) #else #define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */ #endif /* DIAGNOSTIC */ #define NULLVP ((struct vnode *)NULL) /* * Global vnode data. */ extern struct vnode *rootvnode; /* root (i.e. "/") vnode */ extern struct mount *rootdevmp; /* "/dev" mount */ extern int desiredvnodes; /* number of vnodes desired */ extern struct uma_zone *namei_zone; extern struct vattr va_null; /* predefined null vattr structure */ #define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock) #define VI_LOCK_FLAGS(vp, flags) mtx_lock_flags(&(vp)->v_interlock, (flags)) #define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock) #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock) #define VI_MTX(vp) (&(vp)->v_interlock) #define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock) #define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock) #define VN_LOCK_DSHARE(vp) lockdisableshare((vp)->v_vnlock) #endif /* _KERNEL */ /* * Mods for extensibility. */ /* * Flags for vdesc_flags: */ #define VDESC_MAX_VPS 16 /* Low order 16 flag bits are reserved for willrele flags for vp arguments. */ #define VDESC_VP0_WILLRELE 0x0001 #define VDESC_VP1_WILLRELE 0x0002 #define VDESC_VP2_WILLRELE 0x0004 #define VDESC_VP3_WILLRELE 0x0008 #define VDESC_NOMAP_VPP 0x0100 #define VDESC_VPP_WILLRELE 0x0200 /* * A generic structure. * This can be used by bypass routines to identify generic arguments. */ struct vop_generic_args { struct vnodeop_desc *a_desc; /* other random data follows, presumably */ }; typedef int vop_bypass_t(struct vop_generic_args *); /* * VDESC_NO_OFFSET is used to identify the end of the offset list * and in places where no such field exists. */ #define VDESC_NO_OFFSET -1 /* * This structure describes the vnode operation taking place. */ struct vnodeop_desc { char *vdesc_name; /* a readable name for debugging */ int vdesc_flags; /* VDESC_* flags */ vop_bypass_t *vdesc_call; /* Function to call */ /* * These ops are used by bypass routines to map and locate arguments. * Creds and procs are not needed in bypass routines, but sometimes * they are useful to (for example) transport layers. * Nameidata is useful because it has a cred in it. */ int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */ int vdesc_vpp_offset; /* return vpp location */ int vdesc_cred_offset; /* cred location, if any */ int vdesc_thread_offset; /* thread location, if any */ int vdesc_componentname_offset; /* if any */ }; #ifdef _KERNEL /* * A list of all the operation descs. */ extern struct vnodeop_desc *vnodeop_descs[]; #define VOPARG_OFFSETOF(s_type, field) __offsetof(s_type, field) #define VOPARG_OFFSETTO(s_type, s_offset, struct_p) \ ((s_type)(((char*)(struct_p)) + (s_offset))) #ifdef DEBUG_VFS_LOCKS /* * Support code to aid in debugging VFS locking problems. Not totally * reliable since if the thread sleeps between changing the lock * state and checking it with the assert, some other thread could * change the state. They are good enough for debugging a single * filesystem using a single-threaded test. Note that the unreliability is * limited to false negatives; efforts were made to ensure that false * positives cannot occur. */ void assert_vi_locked(struct vnode *vp, const char *str); void assert_vi_unlocked(struct vnode *vp, const char *str); void assert_vop_elocked(struct vnode *vp, const char *str); void assert_vop_locked(struct vnode *vp, const char *str); void assert_vop_unlocked(struct vnode *vp, const char *str); #define ASSERT_VI_LOCKED(vp, str) assert_vi_locked((vp), (str)) #define ASSERT_VI_UNLOCKED(vp, str) assert_vi_unlocked((vp), (str)) #define ASSERT_VOP_ELOCKED(vp, str) assert_vop_elocked((vp), (str)) #define ASSERT_VOP_LOCKED(vp, str) assert_vop_locked((vp), (str)) #define ASSERT_VOP_UNLOCKED(vp, str) assert_vop_unlocked((vp), (str)) #else /* !DEBUG_VFS_LOCKS */ #define ASSERT_VI_LOCKED(vp, str) ((void)0) #define ASSERT_VI_UNLOCKED(vp, str) ((void)0) #define ASSERT_VOP_ELOCKED(vp, str) ((void)0) #define ASSERT_VOP_LOCKED(vp, str) ((void)0) #define ASSERT_VOP_UNLOCKED(vp, str) ((void)0) #endif /* DEBUG_VFS_LOCKS */ /* * This call works for vnodes in the kernel. */ #define VCALL(c) ((c)->a_desc->vdesc_call(c)) #define DOINGASYNC(vp) \ (((vp)->v_mount->mnt_kern_flag & MNTK_ASYNC) != 0 && \ ((curthread->td_pflags & TDP_SYNCIO) == 0)) /* * VMIO support inline */ extern int vmiodirenable; static __inline int vn_canvmio(struct vnode *vp) { if (vp && (vp->v_type == VREG || (vmiodirenable && vp->v_type == VDIR))) return(TRUE); return(FALSE); } /* * Finally, include the default set of vnode operations. */ typedef void vop_getpages_iodone_t(void *, vm_page_t *, int, int); #include "vnode_if.h" /* vn_open_flags */ #define VN_OPEN_NOAUDIT 0x00000001 #define VN_OPEN_NOCAPCHECK 0x00000002 #define VN_OPEN_NAMECACHE 0x00000004 /* * Public vnode manipulation functions. */ struct componentname; struct file; struct mount; struct nameidata; struct ostat; struct freebsd11_stat; struct thread; struct proc; struct stat; struct nstat; struct ucred; struct uio; struct vattr; struct vfsops; struct vnode; typedef int (*vn_get_ino_t)(struct mount *, void *, int, struct vnode **); int bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn); /* cache_* may belong in namei.h. */ void cache_changesize(int newhashsize); #define cache_enter(dvp, vp, cnp) \ cache_enter_time(dvp, vp, cnp, NULL, NULL) void cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, struct timespec *tsp, struct timespec *dtsp); int cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct timespec *tsp, int *ticksp); void cache_purge(struct vnode *vp); void cache_purge_negative(struct vnode *vp); void cache_purgevfs(struct mount *mp, bool force); int change_dir(struct vnode *vp, struct thread *td); void cvtstat(struct stat *st, struct ostat *ost); void freebsd11_cvtnstat(struct stat *sb, struct nstat *nsb); void freebsd11_cvtstat(struct stat *st, struct freebsd11_stat *ost); int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, struct vnode **vpp); void getnewvnode_reserve(u_int count); void getnewvnode_drop_reserve(void); int insmntque1(struct vnode *vp, struct mount *mp, void (*dtr)(struct vnode *, void *), void *dtr_arg); int insmntque(struct vnode *vp, struct mount *mp); u_quad_t init_va_filerev(void); int speedup_syncer(void); int vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen); int vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf); int vn_fullpath_global(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf); struct vnode * vn_dir_dd_ino(struct vnode *vp); int vn_commname(struct vnode *vn, char *buf, u_int buflen); int vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, u_int pathlen); int vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, accmode_t accmode, struct ucred *cred, int *privused); int vaccess_acl_nfs4(enum vtype type, uid_t file_uid, gid_t file_gid, struct acl *aclp, accmode_t accmode, struct ucred *cred, int *privused); int vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid, struct acl *acl, accmode_t accmode, struct ucred *cred, int *privused); void vattr_null(struct vattr *vap); int vcount(struct vnode *vp); #define vdrop(vp) _vdrop((vp), 0) #define vdropl(vp) _vdrop((vp), 1) void _vdrop(struct vnode *, bool); int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td); int vget(struct vnode *vp, int lockflag, struct thread *td); void vgone(struct vnode *vp); #define vhold(vp) _vhold((vp), 0) #define vholdl(vp) _vhold((vp), 1) void _vhold(struct vnode *, bool); void vinactive(struct vnode *, struct thread *); int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo); int vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize); void vunref(struct vnode *); void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3); int vrecycle(struct vnode *vp); int vrecyclel(struct vnode *vp); int vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred); int vn_close(struct vnode *vp, int flags, struct ucred *file_cred, struct thread *td); void vn_finished_write(struct mount *mp); void vn_finished_secondary_write(struct mount *mp); int vn_isdisk(struct vnode *vp, int *errp); int _vn_lock(struct vnode *vp, int flags, char *file, int line); #define vn_lock(vp, flags) _vn_lock(vp, flags, __FILE__, __LINE__) int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp); int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, struct ucred *cred, struct file *fp); int vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, struct thread *td, struct file *fp); void vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end); int vn_pollrecord(struct vnode *vp, struct thread *p, int events); int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred, struct ucred *file_cred, ssize_t *aresid, struct thread *td); int vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, size_t len, off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred, struct ucred *file_cred, size_t *aresid, struct thread *td); int vn_rlimit_fsize(const struct vnode *vn, const struct uio *uio, struct thread *td); int vn_stat(struct vnode *vp, struct stat *sb, struct ucred *active_cred, struct ucred *file_cred, struct thread *td); int vn_start_write(struct vnode *vp, struct mount **mpp, int flags); int vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags); int vn_writechk(struct vnode *vp); int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, int *buflen, char *buf, struct thread *td); int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, int buflen, char *buf, struct thread *td); int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, struct thread *td); int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp); int vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, int lkflags, struct vnode **rvp); int vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td); int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio); int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, struct uio *uio); #define vn_rangelock_unlock(vp, cookie) \ rangelock_unlock(&(vp)->v_rl, (cookie), VI_MTX(vp)) #define vn_rangelock_unlock_range(vp, cookie, start, end) \ rangelock_unlock_range(&(vp)->v_rl, (cookie), (start), (end), \ VI_MTX(vp)) #define vn_rangelock_rlock(vp, start, end) \ rangelock_rlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) #define vn_rangelock_wlock(vp, start, end) \ rangelock_wlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) int vfs_cache_lookup(struct vop_lookup_args *ap); void vfs_timestamp(struct timespec *); void vfs_write_resume(struct mount *mp, int flags); int vfs_write_suspend(struct mount *mp, int flags); int vfs_write_suspend_umnt(struct mount *mp); void vnlru_free(int, struct vfsops *); int vop_stdbmap(struct vop_bmap_args *); int vop_stdfdatasync_buf(struct vop_fdatasync_args *); int vop_stdfsync(struct vop_fsync_args *); int vop_stdgetwritemount(struct vop_getwritemount_args *); int vop_stdgetpages(struct vop_getpages_args *); int vop_stdinactive(struct vop_inactive_args *); int vop_stdislocked(struct vop_islocked_args *); int vop_stdkqfilter(struct vop_kqfilter_args *); int vop_stdlock(struct vop_lock1_args *); int vop_stdputpages(struct vop_putpages_args *); int vop_stdunlock(struct vop_unlock_args *); int vop_nopoll(struct vop_poll_args *); int vop_stdaccess(struct vop_access_args *ap); int vop_stdaccessx(struct vop_accessx_args *ap); int vop_stdadvise(struct vop_advise_args *ap); int vop_stdadvlock(struct vop_advlock_args *ap); int vop_stdadvlockasync(struct vop_advlockasync_args *ap); int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap); int vop_stdallocate(struct vop_allocate_args *ap); int vop_stdpathconf(struct vop_pathconf_args *); int vop_stdpoll(struct vop_poll_args *); int vop_stdvptocnp(struct vop_vptocnp_args *ap); int vop_stdvptofh(struct vop_vptofh_args *ap); int vop_stdunp_bind(struct vop_unp_bind_args *ap); int vop_stdunp_connect(struct vop_unp_connect_args *ap); int vop_stdunp_detach(struct vop_unp_detach_args *ap); int vop_eopnotsupp(struct vop_generic_args *ap); int vop_ebadf(struct vop_generic_args *ap); int vop_einval(struct vop_generic_args *ap); int vop_enoent(struct vop_generic_args *ap); int vop_enotty(struct vop_generic_args *ap); int vop_null(struct vop_generic_args *ap); int vop_panic(struct vop_generic_args *ap); int dead_poll(struct vop_poll_args *ap); int dead_read(struct vop_read_args *ap); int dead_write(struct vop_write_args *ap); /* These are called from within the actual VOPS. */ void vop_close_post(void *a, int rc); void vop_create_post(void *a, int rc); void vop_deleteextattr_post(void *a, int rc); void vop_link_post(void *a, int rc); void vop_lookup_post(void *a, int rc); void vop_lookup_pre(void *a); void vop_mkdir_post(void *a, int rc); void vop_mknod_post(void *a, int rc); void vop_open_post(void *a, int rc); void vop_read_post(void *a, int rc); void vop_readdir_post(void *a, int rc); void vop_reclaim_post(void *a, int rc); void vop_remove_post(void *a, int rc); void vop_rename_post(void *a, int rc); void vop_rename_pre(void *a); void vop_rmdir_post(void *a, int rc); void vop_setattr_post(void *a, int rc); void vop_setextattr_post(void *a, int rc); void vop_symlink_post(void *a, int rc); #ifdef DEBUG_VFS_LOCKS void vop_strategy_pre(void *a); void vop_lock_pre(void *a); void vop_lock_post(void *a, int rc); void vop_unlock_post(void *a, int rc); void vop_unlock_pre(void *a); #else #define vop_strategy_pre(x) do { } while (0) #define vop_lock_pre(x) do { } while (0) #define vop_lock_post(x, y) do { } while (0) #define vop_unlock_post(x, y) do { } while (0) #define vop_unlock_pre(x) do { } while (0) #endif void vop_rename_fail(struct vop_rename_args *ap); #define VOP_WRITE_PRE(ap) \ struct vattr va; \ int error; \ off_t osize, ooffset, noffset; \ \ osize = ooffset = noffset = 0; \ if (!VN_KNLIST_EMPTY((ap)->a_vp)) { \ error = VOP_GETATTR((ap)->a_vp, &va, (ap)->a_cred); \ if (error) \ return (error); \ ooffset = (ap)->a_uio->uio_offset; \ osize = (off_t)va.va_size; \ } #define VOP_WRITE_POST(ap, ret) \ noffset = (ap)->a_uio->uio_offset; \ if (noffset > ooffset && !VN_KNLIST_EMPTY((ap)->a_vp)) { \ VFS_KNOTE_LOCKED((ap)->a_vp, NOTE_WRITE \ | (noffset > osize ? NOTE_EXTEND : 0)); \ } #define VOP_LOCK(vp, flags) VOP_LOCK1(vp, flags, __FILE__, __LINE__) void vput(struct vnode *vp); void vrele(struct vnode *vp); void vref(struct vnode *vp); void vrefl(struct vnode *vp); void vrefact(struct vnode *vp); int vrefcnt(struct vnode *vp); void v_addpollinfo(struct vnode *vp); int vnode_create_vobject(struct vnode *vp, off_t size, struct thread *td); void vnode_destroy_vobject(struct vnode *vp); extern struct vop_vector fifo_specops; extern struct vop_vector dead_vnodeops; extern struct vop_vector default_vnodeops; #define VOP_PANIC ((void*)(uintptr_t)vop_panic) #define VOP_NULL ((void*)(uintptr_t)vop_null) #define VOP_EBADF ((void*)(uintptr_t)vop_ebadf) #define VOP_ENOTTY ((void*)(uintptr_t)vop_enotty) #define VOP_EINVAL ((void*)(uintptr_t)vop_einval) #define VOP_ENOENT ((void*)(uintptr_t)vop_enoent) #define VOP_EOPNOTSUPP ((void*)(uintptr_t)vop_eopnotsupp) /* fifo_vnops.c */ int fifo_printinfo(struct vnode *); /* vfs_hash.c */ typedef int vfs_hash_cmp_t(struct vnode *vp, void *arg); void vfs_hash_changesize(int newhashsize); int vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg); u_int vfs_hash_index(struct vnode *vp); int vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg); void vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg); void vfs_hash_rehash(struct vnode *vp, u_int hash); void vfs_hash_remove(struct vnode *vp); int vfs_kqfilter(struct vop_kqfilter_args *); void vfs_mark_atime(struct vnode *vp, struct ucred *cred); struct dirent; int vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off); int vfs_unixify_accmode(accmode_t *accmode); void vfs_unp_reclaim(struct vnode *vp); int setfmode(struct thread *td, struct ucred *cred, struct vnode *vp, int mode); int setfown(struct thread *td, struct ucred *cred, struct vnode *vp, uid_t uid, gid_t gid); int vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td); int vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td); void vn_fsid(struct vnode *vp, struct vattr *va); #endif /* _KERNEL */ #endif /* !_SYS_VNODE_H_ */ Index: projects/clang500-import/sys/ufs/ffs/ffs_softdep.c =================================================================== --- projects/clang500-import/sys/ufs/ffs/ffs_softdep.c (revision 319548) +++ projects/clang500-import/sys/ufs/ffs/ffs_softdep.c (revision 319549) @@ -1,14412 +1,14459 @@ /*- * Copyright 1998, 2000 Marshall Kirk McKusick. * Copyright 2009, 2010 Jeffrey W. Roberson * All rights reserved. * * The soft updates code is derived from the appendix of a University * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, * "Soft Updates: A Solution to the Metadata Update Problem in File * Systems", CSE-TR-254-95, August 1995). * * Further information about soft updates can be obtained from: * * Marshall Kirk McKusick http://www.mckusick.com/softdep/ * 1614 Oxford Street mckusick@mckusick.com * Berkeley, CA 94709-1608 +1-510-843-9542 * USA * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 */ #include __FBSDID("$FreeBSD$"); #include "opt_ffs.h" #include "opt_quota.h" #include "opt_ddb.h" /* * For now we want the safety net that the DEBUG flag provides. */ #ifndef DEBUG #define DEBUG #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define KTR_SUJ 0 /* Define to KTR_SPARE. */ #ifndef SOFTUPDATES int softdep_flushfiles(oldmnt, flags, td) struct mount *oldmnt; int flags; struct thread *td; { panic("softdep_flushfiles called"); } int softdep_mount(devvp, mp, fs, cred) struct vnode *devvp; struct mount *mp; struct fs *fs; struct ucred *cred; { return (0); } void softdep_initialize() { return; } void softdep_uninitialize() { return; } void softdep_unmount(mp) struct mount *mp; { panic("softdep_unmount called"); } void softdep_setup_sbupdate(ump, fs, bp) struct ufsmount *ump; struct fs *fs; struct buf *bp; { panic("softdep_setup_sbupdate called"); } void softdep_setup_inomapdep(bp, ip, newinum, mode) struct buf *bp; struct inode *ip; ino_t newinum; int mode; { panic("softdep_setup_inomapdep called"); } void softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) struct buf *bp; struct mount *mp; ufs2_daddr_t newblkno; int frags; int oldfrags; { panic("softdep_setup_blkmapdep called"); } void softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) struct inode *ip; ufs_lbn_t lbn; ufs2_daddr_t newblkno; ufs2_daddr_t oldblkno; long newsize; long oldsize; struct buf *bp; { panic("softdep_setup_allocdirect called"); } void softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) struct inode *ip; ufs_lbn_t lbn; ufs2_daddr_t newblkno; ufs2_daddr_t oldblkno; long newsize; long oldsize; struct buf *bp; { panic("softdep_setup_allocext called"); } void softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) struct inode *ip; ufs_lbn_t lbn; struct buf *bp; int ptrno; ufs2_daddr_t newblkno; ufs2_daddr_t oldblkno; struct buf *nbp; { panic("softdep_setup_allocindir_page called"); } void softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) struct buf *nbp; struct inode *ip; struct buf *bp; int ptrno; ufs2_daddr_t newblkno; { panic("softdep_setup_allocindir_meta called"); } void softdep_journal_freeblocks(ip, cred, length, flags) struct inode *ip; struct ucred *cred; off_t length; int flags; { panic("softdep_journal_freeblocks called"); } void softdep_journal_fsync(ip) struct inode *ip; { panic("softdep_journal_fsync called"); } void softdep_setup_freeblocks(ip, length, flags) struct inode *ip; off_t length; int flags; { panic("softdep_setup_freeblocks called"); } void softdep_freefile(pvp, ino, mode) struct vnode *pvp; ino_t ino; int mode; { panic("softdep_freefile called"); } int softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) struct buf *bp; struct inode *dp; off_t diroffset; ino_t newinum; struct buf *newdirbp; int isnewblk; { panic("softdep_setup_directory_add called"); } void softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) struct buf *bp; struct inode *dp; caddr_t base; caddr_t oldloc; caddr_t newloc; int entrysize; { panic("softdep_change_directoryentry_offset called"); } void softdep_setup_remove(bp, dp, ip, isrmdir) struct buf *bp; struct inode *dp; struct inode *ip; int isrmdir; { panic("softdep_setup_remove called"); } void softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) struct buf *bp; struct inode *dp; struct inode *ip; ino_t newinum; int isrmdir; { panic("softdep_setup_directory_change called"); } void softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) struct mount *mp; struct buf *bp; ufs2_daddr_t blkno; int frags; struct workhead *wkhd; { panic("%s called", __FUNCTION__); } void softdep_setup_inofree(mp, bp, ino, wkhd) struct mount *mp; struct buf *bp; ino_t ino; struct workhead *wkhd; { panic("%s called", __FUNCTION__); } void softdep_setup_unlink(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_link(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_revert_link(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_rmdir(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_revert_rmdir(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_create(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_revert_create(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_mkdir(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_revert_mkdir(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_dotdot_link(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } int softdep_prealloc(vp, waitok) struct vnode *vp; int waitok; { panic("%s called", __FUNCTION__); } int softdep_journal_lookup(mp, vpp) struct mount *mp; struct vnode **vpp; { return (ENOENT); } void softdep_change_linkcnt(ip) struct inode *ip; { panic("softdep_change_linkcnt called"); } void softdep_load_inodeblock(ip) struct inode *ip; { panic("softdep_load_inodeblock called"); } void softdep_update_inodeblock(ip, bp, waitfor) struct inode *ip; struct buf *bp; int waitfor; { panic("softdep_update_inodeblock called"); } int softdep_fsync(vp) struct vnode *vp; /* the "in_core" copy of the inode */ { return (0); } void softdep_fsync_mountdev(vp) struct vnode *vp; { return; } int softdep_flushworklist(oldmnt, countp, td) struct mount *oldmnt; int *countp; struct thread *td; { *countp = 0; return (0); } int softdep_sync_metadata(struct vnode *vp) { panic("softdep_sync_metadata called"); } int softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) { panic("softdep_sync_buf called"); } int softdep_slowdown(vp) struct vnode *vp; { panic("softdep_slowdown called"); } int softdep_request_cleanup(fs, vp, cred, resource) struct fs *fs; struct vnode *vp; struct ucred *cred; int resource; { return (0); } int softdep_check_suspend(struct mount *mp, struct vnode *devvp, int softdep_depcnt, int softdep_accdepcnt, int secondary_writes, int secondary_accwrites) { struct bufobj *bo; int error; (void) softdep_depcnt, (void) softdep_accdepcnt; bo = &devvp->v_bufobj; ASSERT_BO_WLOCKED(bo); MNT_ILOCK(mp); while (mp->mnt_secondary_writes != 0) { BO_UNLOCK(bo); msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), (PUSER - 1) | PDROP, "secwr", 0); BO_LOCK(bo); MNT_ILOCK(mp); } /* * Reasons for needing more work before suspend: * - Dirty buffers on devvp. * - Secondary writes occurred after start of vnode sync loop */ error = 0; if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0 || secondary_writes != 0 || mp->mnt_secondary_writes != 0 || secondary_accwrites != mp->mnt_secondary_accwrites) error = EAGAIN; BO_UNLOCK(bo); return (error); } void softdep_get_depcounts(struct mount *mp, int *softdepactivep, int *softdepactiveaccp) { (void) mp; *softdepactivep = 0; *softdepactiveaccp = 0; } void softdep_buf_append(bp, wkhd) struct buf *bp; struct workhead *wkhd; { panic("softdep_buf_appendwork called"); } void softdep_inode_append(ip, cred, wkhd) struct inode *ip; struct ucred *cred; struct workhead *wkhd; { panic("softdep_inode_appendwork called"); } void softdep_freework(wkhd) struct workhead *wkhd; { panic("softdep_freework called"); } #else FEATURE(softupdates, "FFS soft-updates support"); static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0, "soft updates stats"); static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0, "total dependencies allocated"); static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0, "high use dependencies allocated"); static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0, "current dependencies allocated"); static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0, "current dependencies written"); unsigned long dep_current[D_LAST + 1]; unsigned long dep_highuse[D_LAST + 1]; unsigned long dep_total[D_LAST + 1]; unsigned long dep_write[D_LAST + 1]; #define SOFTDEP_TYPE(type, str, long) \ static MALLOC_DEFINE(M_ ## type, #str, long); \ SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \ &dep_total[D_ ## type], 0, ""); \ SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \ &dep_current[D_ ## type], 0, ""); \ SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, \ &dep_highuse[D_ ## type], 0, ""); \ SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \ &dep_write[D_ ## type], 0, ""); SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies"); SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies"); SOFTDEP_TYPE(BMSAFEMAP, bmsafemap, "Block or frag allocated from cyl group map"); SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency"); SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode"); SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies"); SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block"); SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode"); SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode"); SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated"); SOFTDEP_TYPE(DIRADD, diradd, "New directory entry"); SOFTDEP_TYPE(MKDIR, mkdir, "New directory"); SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted"); SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block"); SOFTDEP_TYPE(FREEWORK, freework, "free an inode block"); SOFTDEP_TYPE(FREEDEP, freedep, "track a block free"); SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add"); SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove"); SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move"); SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block"); SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block"); SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag"); SOFTDEP_TYPE(JSEG, jseg, "Journal segment"); SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete"); SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency"); SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation"); SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete"); static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel"); static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes"); static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations"); static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data"); #define M_SOFTDEP_FLAGS (M_WAITOK) /* * translate from workitem type to memory type * MUST match the defines above, such that memtype[D_XXX] == M_XXX */ static struct malloc_type *memtype[] = { M_PAGEDEP, M_INODEDEP, M_BMSAFEMAP, M_NEWBLK, M_ALLOCDIRECT, M_INDIRDEP, M_ALLOCINDIR, M_FREEFRAG, M_FREEBLKS, M_FREEFILE, M_DIRADD, M_MKDIR, M_DIRREM, M_NEWDIRBLK, M_FREEWORK, M_FREEDEP, M_JADDREF, M_JREMREF, M_JMVREF, M_JNEWBLK, M_JFREEBLK, M_JFREEFRAG, M_JSEG, M_JSEGDEP, M_SBDEP, M_JTRUNC, M_JFSYNC, M_SENTINEL }; #define DtoM(type) (memtype[type]) /* * Names of malloc types. */ #define TYPENAME(type) \ ((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???") /* * End system adaptation definitions. */ #define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino) #define DOT_OFFSET offsetof(struct dirtemplate, dot_ino) /* * Internal function prototypes. */ static void check_clear_deps(struct mount *); static void softdep_error(char *, int); static int softdep_process_worklist(struct mount *, int); static int softdep_waitidle(struct mount *, int); static void drain_output(struct vnode *); static struct buf *getdirtybuf(struct buf *, struct rwlock *, int); static int check_inodedep_free(struct inodedep *); static void clear_remove(struct mount *); static void clear_inodedeps(struct mount *); static void unlinked_inodedep(struct mount *, struct inodedep *); static void clear_unlinked_inodedep(struct inodedep *); static struct inodedep *first_unlinked_inodedep(struct ufsmount *); static int flush_pagedep_deps(struct vnode *, struct mount *, struct diraddhd *); static int free_pagedep(struct pagedep *); static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t); static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t); static int flush_deplist(struct allocdirectlst *, int, int *); static int sync_cgs(struct mount *, int); static int handle_written_filepage(struct pagedep *, struct buf *, int); static int handle_written_sbdep(struct sbdep *, struct buf *); static void initiate_write_sbdep(struct sbdep *); static void diradd_inode_written(struct diradd *, struct inodedep *); static int handle_written_indirdep(struct indirdep *, struct buf *, struct buf**, int); static int handle_written_inodeblock(struct inodedep *, struct buf *, int); static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *, uint8_t *); static int handle_written_bmsafemap(struct bmsafemap *, struct buf *, int); static void handle_written_jaddref(struct jaddref *); static void handle_written_jremref(struct jremref *); static void handle_written_jseg(struct jseg *, struct buf *); static void handle_written_jnewblk(struct jnewblk *); static void handle_written_jblkdep(struct jblkdep *); static void handle_written_jfreefrag(struct jfreefrag *); static void complete_jseg(struct jseg *); static void complete_jsegs(struct jseg *); static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *); static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *); static void jremref_write(struct jremref *, struct jseg *, uint8_t *); static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *); static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *); static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data); static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *); static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *); static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *); static inline void inoref_write(struct inoref *, struct jseg *, struct jrefrec *); static void handle_allocdirect_partdone(struct allocdirect *, struct workhead *); static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *, struct workhead *); static void indirdep_complete(struct indirdep *); static int indirblk_lookup(struct mount *, ufs2_daddr_t); static void indirblk_insert(struct freework *); static void indirblk_remove(struct freework *); static void handle_allocindir_partdone(struct allocindir *); static void initiate_write_filepage(struct pagedep *, struct buf *); static void initiate_write_indirdep(struct indirdep*, struct buf *); static void handle_written_mkdir(struct mkdir *, int); static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *, uint8_t *); static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *); static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); static void handle_workitem_freefile(struct freefile *); static int handle_workitem_remove(struct dirrem *, int); static struct dirrem *newdirrem(struct buf *, struct inode *, struct inode *, int, struct dirrem **); static struct indirdep *indirdep_lookup(struct mount *, struct inode *, struct buf *); static void cancel_indirdep(struct indirdep *, struct buf *, struct freeblks *); static void free_indirdep(struct indirdep *); static void free_diradd(struct diradd *, struct workhead *); static void merge_diradd(struct inodedep *, struct diradd *); static void complete_diradd(struct diradd *); static struct diradd *diradd_lookup(struct pagedep *, int); static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *, struct jremref *); static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *, struct jremref *); static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *, struct jremref *, struct jremref *); static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *, struct jremref *); static void cancel_allocindir(struct allocindir *, struct buf *bp, struct freeblks *, int); static int setup_trunc_indir(struct freeblks *, struct inode *, ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t); static void complete_trunc_indir(struct freework *); static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *, int); static void complete_mkdir(struct mkdir *); static void free_newdirblk(struct newdirblk *); static void free_jremref(struct jremref *); static void free_jaddref(struct jaddref *); static void free_jsegdep(struct jsegdep *); static void free_jsegs(struct jblocks *); static void rele_jseg(struct jseg *); static void free_jseg(struct jseg *, struct jblocks *); static void free_jnewblk(struct jnewblk *); static void free_jblkdep(struct jblkdep *); static void free_jfreefrag(struct jfreefrag *); static void free_freedep(struct freedep *); static void journal_jremref(struct dirrem *, struct jremref *, struct inodedep *); static void cancel_jnewblk(struct jnewblk *, struct workhead *); static int cancel_jaddref(struct jaddref *, struct inodedep *, struct workhead *); static void cancel_jfreefrag(struct jfreefrag *); static inline void setup_freedirect(struct freeblks *, struct inode *, int, int); static inline void setup_freeext(struct freeblks *, struct inode *, int, int); static inline void setup_freeindir(struct freeblks *, struct inode *, int, ufs_lbn_t, int); static inline struct freeblks *newfreeblks(struct mount *, struct inode *); static void freeblks_free(struct ufsmount *, struct freeblks *, int); static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t); static ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t); static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int); static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t, int, int); static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int); static int cancel_pagedep(struct pagedep *, struct freeblks *, int); static int deallocate_dependencies(struct buf *, struct freeblks *, int); static void newblk_freefrag(struct newblk*); static void free_newblk(struct newblk *); static void cancel_allocdirect(struct allocdirectlst *, struct allocdirect *, struct freeblks *); static int check_inode_unwritten(struct inodedep *); static int free_inodedep(struct inodedep *); static void freework_freeblock(struct freework *); static void freework_enqueue(struct freework *); static int handle_workitem_freeblocks(struct freeblks *, int); static int handle_complete_freeblocks(struct freeblks *, int); static void handle_workitem_indirblk(struct freework *); static void handle_written_freework(struct freework *); static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); static struct worklist *jnewblk_merge(struct worklist *, struct worklist *, struct workhead *); static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *, struct inodedep *, struct allocindir *, ufs_lbn_t); static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, ufs2_daddr_t, ufs_lbn_t); static void handle_workitem_freefrag(struct freefrag *); static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long, ufs_lbn_t); static void allocdirect_merge(struct allocdirectlst *, struct allocdirect *, struct allocdirect *); static struct freefrag *allocindir_merge(struct allocindir *, struct allocindir *); static int bmsafemap_find(struct bmsafemap_hashhead *, int, struct bmsafemap **); static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *, int cg, struct bmsafemap *); static int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int, struct newblk **); static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **); static int inodedep_find(struct inodedep_hashhead *, ino_t, struct inodedep **); static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **); static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t, int, struct pagedep **); static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t, struct pagedep **); static void pause_timer(void *); static int request_cleanup(struct mount *, int); +static int softdep_request_cleanup_flush(struct mount *, struct ufsmount *); static void schedule_cleanup(struct mount *); static void softdep_ast_cleanup_proc(struct thread *); static int process_worklist_item(struct mount *, int, int); static void process_removes(struct vnode *); static void process_truncates(struct vnode *); static void jwork_move(struct workhead *, struct workhead *); static void jwork_insert(struct workhead *, struct jsegdep *); static void add_to_worklist(struct worklist *, int); static void wake_worklist(struct worklist *); static void wait_worklist(struct worklist *, char *); static void remove_from_worklist(struct worklist *); static void softdep_flush(void *); static void softdep_flushjournal(struct mount *); static int softdep_speedup(struct ufsmount *); static void worklist_speedup(struct mount *); static int journal_mount(struct mount *, struct fs *, struct ucred *); static void journal_unmount(struct ufsmount *); static int journal_space(struct ufsmount *, int); static void journal_suspend(struct ufsmount *); static int journal_unsuspend(struct ufsmount *ump); static void softdep_prelink(struct vnode *, struct vnode *); static void add_to_journal(struct worklist *); static void remove_from_journal(struct worklist *); static bool softdep_excess_items(struct ufsmount *, int); static void softdep_process_journal(struct mount *, struct worklist *, int); static struct jremref *newjremref(struct dirrem *, struct inode *, struct inode *ip, off_t, nlink_t); static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t, uint16_t); static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t, uint16_t); static inline struct jsegdep *inoref_jseg(struct inoref *); static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t); static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t, ufs2_daddr_t, int); static void adjust_newfreework(struct freeblks *, int); static struct jtrunc *newjtrunc(struct freeblks *, off_t, int); static void move_newblock_dep(struct jaddref *, struct inodedep *); static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t); static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *, ufs2_daddr_t, long, ufs_lbn_t); static struct freework *newfreework(struct ufsmount *, struct freeblks *, struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int); static int jwait(struct worklist *, int); static struct inodedep *inodedep_lookup_ip(struct inode *); static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *); static struct freefile *handle_bufwait(struct inodedep *, struct workhead *); static void handle_jwork(struct workhead *); static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *, struct mkdir **); static struct jblocks *jblocks_create(void); static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *); static void jblocks_free(struct jblocks *, struct mount *, int); static void jblocks_destroy(struct jblocks *); static void jblocks_add(struct jblocks *, ufs2_daddr_t, int); /* * Exported softdep operations. */ static void softdep_disk_io_initiation(struct buf *); static void softdep_disk_write_complete(struct buf *); static void softdep_deallocate_dependencies(struct buf *); static int softdep_count_dependencies(struct buf *bp, int); /* * Global lock over all of soft updates. */ static struct mtx lk; MTX_SYSINIT(softdep_lock, &lk, "Global Softdep Lock", MTX_DEF); #define ACQUIRE_GBLLOCK(lk) mtx_lock(lk) #define FREE_GBLLOCK(lk) mtx_unlock(lk) #define GBLLOCK_OWNED(lk) mtx_assert((lk), MA_OWNED) /* * Per-filesystem soft-updates locking. */ #define LOCK_PTR(ump) (&(ump)->um_softdep->sd_fslock) #define TRY_ACQUIRE_LOCK(ump) rw_try_wlock(&(ump)->um_softdep->sd_fslock) #define ACQUIRE_LOCK(ump) rw_wlock(&(ump)->um_softdep->sd_fslock) #define FREE_LOCK(ump) rw_wunlock(&(ump)->um_softdep->sd_fslock) #define LOCK_OWNED(ump) rw_assert(&(ump)->um_softdep->sd_fslock, \ RA_WLOCKED) #define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock) #define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock) /* * Worklist queue management. * These routines require that the lock be held. */ #ifndef /* NOT */ DEBUG #define WORKLIST_INSERT(head, item) do { \ (item)->wk_state |= ONWORKLIST; \ LIST_INSERT_HEAD(head, item, wk_list); \ } while (0) #define WORKLIST_REMOVE(item) do { \ (item)->wk_state &= ~ONWORKLIST; \ LIST_REMOVE(item, wk_list); \ } while (0) #define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT #define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE #else /* DEBUG */ static void worklist_insert(struct workhead *, struct worklist *, int); static void worklist_remove(struct worklist *, int); #define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1) #define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0) #define WORKLIST_REMOVE(item) worklist_remove(item, 1) #define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0) static void worklist_insert(head, item, locked) struct workhead *head; struct worklist *item; int locked; { if (locked) LOCK_OWNED(VFSTOUFS(item->wk_mp)); if (item->wk_state & ONWORKLIST) panic("worklist_insert: %p %s(0x%X) already on list", item, TYPENAME(item->wk_type), item->wk_state); item->wk_state |= ONWORKLIST; LIST_INSERT_HEAD(head, item, wk_list); } static void worklist_remove(item, locked) struct worklist *item; int locked; { if (locked) LOCK_OWNED(VFSTOUFS(item->wk_mp)); if ((item->wk_state & ONWORKLIST) == 0) panic("worklist_remove: %p %s(0x%X) not on list", item, TYPENAME(item->wk_type), item->wk_state); item->wk_state &= ~ONWORKLIST; LIST_REMOVE(item, wk_list); } #endif /* DEBUG */ /* * Merge two jsegdeps keeping only the oldest one as newer references * can't be discarded until after older references. */ static inline struct jsegdep * jsegdep_merge(struct jsegdep *one, struct jsegdep *two) { struct jsegdep *swp; if (two == NULL) return (one); if (one->jd_seg->js_seq > two->jd_seg->js_seq) { swp = one; one = two; two = swp; } WORKLIST_REMOVE(&two->jd_list); free_jsegdep(two); return (one); } /* * If two freedeps are compatible free one to reduce list size. */ static inline struct freedep * freedep_merge(struct freedep *one, struct freedep *two) { if (two == NULL) return (one); if (one->fd_freework == two->fd_freework) { WORKLIST_REMOVE(&two->fd_list); free_freedep(two); } return (one); } /* * Move journal work from one list to another. Duplicate freedeps and * jsegdeps are coalesced to keep the lists as small as possible. */ static void jwork_move(dst, src) struct workhead *dst; struct workhead *src; { struct freedep *freedep; struct jsegdep *jsegdep; struct worklist *wkn; struct worklist *wk; KASSERT(dst != src, ("jwork_move: dst == src")); freedep = NULL; jsegdep = NULL; LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) { if (wk->wk_type == D_JSEGDEP) jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); else if (wk->wk_type == D_FREEDEP) freedep = freedep_merge(WK_FREEDEP(wk), freedep); } while ((wk = LIST_FIRST(src)) != NULL) { WORKLIST_REMOVE(wk); WORKLIST_INSERT(dst, wk); if (wk->wk_type == D_JSEGDEP) { jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); continue; } if (wk->wk_type == D_FREEDEP) freedep = freedep_merge(WK_FREEDEP(wk), freedep); } } static void jwork_insert(dst, jsegdep) struct workhead *dst; struct jsegdep *jsegdep; { struct jsegdep *jsegdepn; struct worklist *wk; LIST_FOREACH(wk, dst, wk_list) if (wk->wk_type == D_JSEGDEP) break; if (wk == NULL) { WORKLIST_INSERT(dst, &jsegdep->jd_list); return; } jsegdepn = WK_JSEGDEP(wk); if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) { WORKLIST_REMOVE(wk); free_jsegdep(jsegdepn); WORKLIST_INSERT(dst, &jsegdep->jd_list); } else free_jsegdep(jsegdep); } /* * Routines for tracking and managing workitems. */ static void workitem_free(struct worklist *, int); static void workitem_alloc(struct worklist *, int, struct mount *); static void workitem_reassign(struct worklist *, int); #define WORKITEM_FREE(item, type) \ workitem_free((struct worklist *)(item), (type)) #define WORKITEM_REASSIGN(item, type) \ workitem_reassign((struct worklist *)(item), (type)) static void workitem_free(item, type) struct worklist *item; int type; { struct ufsmount *ump; #ifdef DEBUG if (item->wk_state & ONWORKLIST) panic("workitem_free: %s(0x%X) still on list", TYPENAME(item->wk_type), item->wk_state); if (item->wk_type != type && type != D_NEWBLK) panic("workitem_free: type mismatch %s != %s", TYPENAME(item->wk_type), TYPENAME(type)); #endif if (item->wk_state & IOWAITING) wakeup(item); ump = VFSTOUFS(item->wk_mp); LOCK_OWNED(ump); KASSERT(ump->softdep_deps > 0, ("workitem_free: %s: softdep_deps going negative", ump->um_fs->fs_fsmnt)); if (--ump->softdep_deps == 0 && ump->softdep_req) wakeup(&ump->softdep_deps); KASSERT(dep_current[item->wk_type] > 0, ("workitem_free: %s: dep_current[%s] going negative", ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); KASSERT(ump->softdep_curdeps[item->wk_type] > 0, ("workitem_free: %s: softdep_curdeps[%s] going negative", ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); atomic_subtract_long(&dep_current[item->wk_type], 1); ump->softdep_curdeps[item->wk_type] -= 1; free(item, DtoM(type)); } static void workitem_alloc(item, type, mp) struct worklist *item; int type; struct mount *mp; { struct ufsmount *ump; item->wk_type = type; item->wk_mp = mp; item->wk_state = 0; ump = VFSTOUFS(mp); ACQUIRE_GBLLOCK(&lk); dep_current[type]++; if (dep_current[type] > dep_highuse[type]) dep_highuse[type] = dep_current[type]; dep_total[type]++; FREE_GBLLOCK(&lk); ACQUIRE_LOCK(ump); ump->softdep_curdeps[type] += 1; ump->softdep_deps++; ump->softdep_accdeps++; FREE_LOCK(ump); } static void workitem_reassign(item, newtype) struct worklist *item; int newtype; { struct ufsmount *ump; ump = VFSTOUFS(item->wk_mp); LOCK_OWNED(ump); KASSERT(ump->softdep_curdeps[item->wk_type] > 0, ("workitem_reassign: %s: softdep_curdeps[%s] going negative", VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); ump->softdep_curdeps[item->wk_type] -= 1; ump->softdep_curdeps[newtype] += 1; KASSERT(dep_current[item->wk_type] > 0, ("workitem_reassign: %s: dep_current[%s] going negative", VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); ACQUIRE_GBLLOCK(&lk); dep_current[newtype]++; dep_current[item->wk_type]--; if (dep_current[newtype] > dep_highuse[newtype]) dep_highuse[newtype] = dep_current[newtype]; dep_total[newtype]++; FREE_GBLLOCK(&lk); item->wk_type = newtype; } /* * Workitem queue management */ static int max_softdeps; /* maximum number of structs before slowdown */ static int tickdelay = 2; /* number of ticks to pause during slowdown */ static int proc_waiting; /* tracks whether we have a timeout posted */ static int *stat_countp; /* statistic to count in proc_waiting timeout */ static struct callout softdep_callout; static int req_clear_inodedeps; /* syncer process flush some inodedeps */ static int req_clear_remove; /* syncer process flush some freeblks */ static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */ /* * runtime statistics */ static int stat_flush_threads; /* number of softdep flushing threads */ static int stat_worklist_push; /* number of worklist cleanups */ static int stat_blk_limit_push; /* number of times block limit neared */ static int stat_ino_limit_push; /* number of times inode limit neared */ static int stat_blk_limit_hit; /* number of times block slowdown imposed */ static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */ static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */ static int stat_journal_min; /* Times hit journal min threshold */ static int stat_journal_low; /* Times hit journal low threshold */ static int stat_journal_wait; /* Times blocked in jwait(). */ static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */ static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */ static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */ static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */ static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */ static int stat_cleanup_blkrequests; /* Number of block cleanup requests */ static int stat_cleanup_inorequests; /* Number of inode cleanup requests */ static int stat_cleanup_retries; /* Number of cleanups that needed to flush */ static int stat_cleanup_failures; /* Number of cleanup requests that failed */ static int stat_emptyjblocks; /* Number of potentially empty journal blocks */ SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD, &stat_flush_threads, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW, &stat_jaddref, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW, &stat_jnewblk, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW, &stat_journal_low, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW, &stat_journal_min, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW, &stat_journal_wait, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW, &stat_jwait_filepage, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW, &stat_jwait_freeblks, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW, &stat_jwait_inode, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW, &stat_jwait_newblk, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW, &stat_cleanup_blkrequests, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW, &stat_cleanup_inorequests, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW, &stat_cleanup_high_delay, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW, &stat_cleanup_retries, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW, &stat_cleanup_failures, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW, &softdep_flushcache, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD, &stat_emptyjblocks, 0, ""); SYSCTL_DECL(_vfs_ffs); /* Whether to recompute the summary at mount time */ static int compute_summary_at_mount = 0; SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW, &compute_summary_at_mount, 0, "Recompute summary at mount"); static int print_threads = 0; SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW, &print_threads, 0, "Notify flusher thread start/stop"); /* List of all filesystems mounted with soft updates */ static TAILQ_HEAD(, mount_softdeps) softdepmounts; /* * This function cleans the worklist for a filesystem. * Each filesystem running with soft dependencies gets its own * thread to run in this function. The thread is started up in * softdep_mount and shutdown in softdep_unmount. They show up * as part of the kernel "bufdaemon" process whose process * entry is available in bufdaemonproc. */ static int searchfailed; extern struct proc *bufdaemonproc; static void softdep_flush(addr) void *addr; { struct mount *mp; struct thread *td; struct ufsmount *ump; td = curthread; td->td_pflags |= TDP_NORUNNINGBUF; mp = (struct mount *)addr; ump = VFSTOUFS(mp); atomic_add_int(&stat_flush_threads, 1); ACQUIRE_LOCK(ump); ump->softdep_flags &= ~FLUSH_STARTING; wakeup(&ump->softdep_flushtd); FREE_LOCK(ump); if (print_threads) { if (stat_flush_threads == 1) printf("Running %s at pid %d\n", bufdaemonproc->p_comm, bufdaemonproc->p_pid); printf("Start thread %s\n", td->td_name); } for (;;) { while (softdep_process_worklist(mp, 0) > 0 || (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended)) kthread_suspend_check(); ACQUIRE_LOCK(ump); if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdflush", hz / 2); ump->softdep_flags &= ~FLUSH_CLEANUP; /* * Check to see if we are done and need to exit. */ if ((ump->softdep_flags & FLUSH_EXIT) == 0) { FREE_LOCK(ump); continue; } ump->softdep_flags &= ~FLUSH_EXIT; FREE_LOCK(ump); wakeup(&ump->softdep_flags); if (print_threads) printf("Stop thread %s: searchfailed %d, did cleanups %d\n", td->td_name, searchfailed, ump->um_softdep->sd_cleanups); atomic_subtract_int(&stat_flush_threads, 1); kthread_exit(); panic("kthread_exit failed\n"); } } static void worklist_speedup(mp) struct mount *mp; { struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) ump->softdep_flags |= FLUSH_CLEANUP; wakeup(&ump->softdep_flushtd); } static int softdep_speedup(ump) struct ufsmount *ump; { struct ufsmount *altump; struct mount_softdeps *sdp; LOCK_OWNED(ump); worklist_speedup(ump->um_mountp); bd_speedup(); /* * If we have global shortages, then we need other * filesystems to help with the cleanup. Here we wakeup a * flusher thread for a filesystem that is over its fair * share of resources. */ if (req_clear_inodedeps || req_clear_remove) { ACQUIRE_GBLLOCK(&lk); TAILQ_FOREACH(sdp, &softdepmounts, sd_next) { if ((altump = sdp->sd_ump) == ump) continue; if (((req_clear_inodedeps && altump->softdep_curdeps[D_INODEDEP] > max_softdeps / stat_flush_threads) || (req_clear_remove && altump->softdep_curdeps[D_DIRREM] > (max_softdeps / 2) / stat_flush_threads)) && TRY_ACQUIRE_LOCK(altump)) break; } if (sdp == NULL) { searchfailed++; FREE_GBLLOCK(&lk); } else { /* * Move to the end of the list so we pick a * different one on out next try. */ TAILQ_REMOVE(&softdepmounts, sdp, sd_next); TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); FREE_GBLLOCK(&lk); if ((altump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) altump->softdep_flags |= FLUSH_CLEANUP; altump->um_softdep->sd_cleanups++; wakeup(&altump->softdep_flushtd); FREE_LOCK(altump); } } return (speedup_syncer()); } /* * Add an item to the end of the work queue. * This routine requires that the lock be held. * This is the only routine that adds items to the list. * The following routine is the only one that removes items * and does so in order from first to last. */ #define WK_HEAD 0x0001 /* Add to HEAD. */ #define WK_NODELAY 0x0002 /* Process immediately. */ static void add_to_worklist(wk, flags) struct worklist *wk; int flags; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); LOCK_OWNED(ump); if (wk->wk_state & ONWORKLIST) panic("add_to_worklist: %s(0x%X) already on list", TYPENAME(wk->wk_type), wk->wk_state); wk->wk_state |= ONWORKLIST; if (ump->softdep_on_worklist == 0) { LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); ump->softdep_worklist_tail = wk; } else if (flags & WK_HEAD) { LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); } else { LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list); ump->softdep_worklist_tail = wk; } ump->softdep_on_worklist += 1; if (flags & WK_NODELAY) worklist_speedup(wk->wk_mp); } /* * Remove the item to be processed. If we are removing the last * item on the list, we need to recalculate the tail pointer. */ static void remove_from_worklist(wk) struct worklist *wk; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); WORKLIST_REMOVE(wk); if (ump->softdep_worklist_tail == wk) ump->softdep_worklist_tail = (struct worklist *)wk->wk_list.le_prev; ump->softdep_on_worklist -= 1; } static void wake_worklist(wk) struct worklist *wk; { if (wk->wk_state & IOWAITING) { wk->wk_state &= ~IOWAITING; wakeup(wk); } } static void wait_worklist(wk, wmesg) struct worklist *wk; char *wmesg; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); wk->wk_state |= IOWAITING; msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0); } /* * Process that runs once per second to handle items in the background queue. * * Note that we ensure that everything is done in the order in which they * appear in the queue. The code below depends on this property to ensure * that blocks of a file are freed before the inode itself is freed. This * ordering ensures that no new triples will be generated * until all the old ones have been purged from the dependency lists. */ static int softdep_process_worklist(mp, full) struct mount *mp; int full; { int cnt, matchcnt; struct ufsmount *ump; long starttime; KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp")); if (MOUNTEDSOFTDEP(mp) == 0) return (0); matchcnt = 0; ump = VFSTOUFS(mp); ACQUIRE_LOCK(ump); starttime = time_second; softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0); check_clear_deps(mp); while (ump->softdep_on_worklist > 0) { if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0) break; else matchcnt += cnt; check_clear_deps(mp); /* * We do not generally want to stop for buffer space, but if * we are really being a buffer hog, we will stop and wait. */ if (should_yield()) { FREE_LOCK(ump); kern_yield(PRI_USER); bwillwrite(); ACQUIRE_LOCK(ump); } /* * Never allow processing to run for more than one * second. This gives the syncer thread the opportunity * to pause if appropriate. */ if (!full && starttime != time_second) break; } if (full == 0) journal_unsuspend(ump); FREE_LOCK(ump); return (matchcnt); } /* * Process all removes associated with a vnode if we are running out of * journal space. Any other process which attempts to flush these will * be unable as we have the vnodes locked. */ static void process_removes(vp) struct vnode *vp; { struct inodedep *inodedep; struct dirrem *dirrem; struct ufsmount *ump; struct mount *mp; ino_t inum; mp = vp->v_mount; ump = VFSTOUFS(mp); LOCK_OWNED(ump); inum = VTOI(vp)->i_number; for (;;) { top: if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) return; LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) { /* * If another thread is trying to lock this vnode * it will fail but we must wait for it to do so * before we can proceed. */ if (dirrem->dm_state & INPROGRESS) { wait_worklist(&dirrem->dm_list, "pwrwait"); goto top; } if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) == (COMPLETE | ONWORKLIST)) break; } if (dirrem == NULL) return; remove_from_worklist(&dirrem->dm_list); FREE_LOCK(ump); if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) panic("process_removes: suspended filesystem"); handle_workitem_remove(dirrem, 0); vn_finished_secondary_write(mp); ACQUIRE_LOCK(ump); } } /* * Process all truncations associated with a vnode if we are running out * of journal space. This is called when the vnode lock is already held * and no other process can clear the truncation. This function returns * a value greater than zero if it did any work. */ static void process_truncates(vp) struct vnode *vp; { struct inodedep *inodedep; struct freeblks *freeblks; struct ufsmount *ump; struct mount *mp; ino_t inum; int cgwait; mp = vp->v_mount; ump = VFSTOUFS(mp); LOCK_OWNED(ump); inum = VTOI(vp)->i_number; for (;;) { if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) return; cgwait = 0; TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) { /* Journal entries not yet written. */ if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) { jwait(&LIST_FIRST( &freeblks->fb_jblkdephd)->jb_list, MNT_WAIT); break; } /* Another thread is executing this item. */ if (freeblks->fb_state & INPROGRESS) { wait_worklist(&freeblks->fb_list, "ptrwait"); break; } /* Freeblks is waiting on a inode write. */ if ((freeblks->fb_state & COMPLETE) == 0) { FREE_LOCK(ump); ffs_update(vp, 1); ACQUIRE_LOCK(ump); break; } if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) == (ALLCOMPLETE | ONWORKLIST)) { remove_from_worklist(&freeblks->fb_list); freeblks->fb_state |= INPROGRESS; FREE_LOCK(ump); if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) panic("process_truncates: " "suspended filesystem"); handle_workitem_freeblocks(freeblks, 0); vn_finished_secondary_write(mp); ACQUIRE_LOCK(ump); break; } if (freeblks->fb_cgwait) cgwait++; } if (cgwait) { FREE_LOCK(ump); sync_cgs(mp, MNT_WAIT); ffs_sync_snap(mp, MNT_WAIT); ACQUIRE_LOCK(ump); continue; } if (freeblks == NULL) break; } return; } /* * Process one item on the worklist. */ static int process_worklist_item(mp, target, flags) struct mount *mp; int target; int flags; { struct worklist sentinel; struct worklist *wk; struct ufsmount *ump; int matchcnt; int error; KASSERT(mp != NULL, ("process_worklist_item: NULL mp")); /* * If we are being called because of a process doing a * copy-on-write, then it is not safe to write as we may * recurse into the copy-on-write routine. */ if (curthread->td_pflags & TDP_COWINPROGRESS) return (-1); PHOLD(curproc); /* Don't let the stack go away. */ ump = VFSTOUFS(mp); LOCK_OWNED(ump); matchcnt = 0; sentinel.wk_mp = NULL; sentinel.wk_type = D_SENTINEL; LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list); for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL; wk = LIST_NEXT(&sentinel, wk_list)) { if (wk->wk_type == D_SENTINEL) { LIST_REMOVE(&sentinel, wk_list); LIST_INSERT_AFTER(wk, &sentinel, wk_list); continue; } if (wk->wk_state & INPROGRESS) panic("process_worklist_item: %p already in progress.", wk); wk->wk_state |= INPROGRESS; remove_from_worklist(wk); FREE_LOCK(ump); if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) panic("process_worklist_item: suspended filesystem"); switch (wk->wk_type) { case D_DIRREM: /* removal of a directory entry */ error = handle_workitem_remove(WK_DIRREM(wk), flags); break; case D_FREEBLKS: /* releasing blocks and/or fragments from a file */ error = handle_workitem_freeblocks(WK_FREEBLKS(wk), flags); break; case D_FREEFRAG: /* releasing a fragment when replaced as a file grows */ handle_workitem_freefrag(WK_FREEFRAG(wk)); error = 0; break; case D_FREEFILE: /* releasing an inode when its link count drops to 0 */ handle_workitem_freefile(WK_FREEFILE(wk)); error = 0; break; default: panic("%s_process_worklist: Unknown type %s", "softdep", TYPENAME(wk->wk_type)); /* NOTREACHED */ } vn_finished_secondary_write(mp); ACQUIRE_LOCK(ump); if (error == 0) { if (++matchcnt == target) break; continue; } /* * We have to retry the worklist item later. Wake up any * waiters who may be able to complete it immediately and * add the item back to the head so we don't try to execute * it again. */ wk->wk_state &= ~INPROGRESS; wake_worklist(wk); add_to_worklist(wk, WK_HEAD); } LIST_REMOVE(&sentinel, wk_list); /* Sentinal could've become the tail from remove_from_worklist. */ if (ump->softdep_worklist_tail == &sentinel) ump->softdep_worklist_tail = (struct worklist *)sentinel.wk_list.le_prev; PRELE(curproc); return (matchcnt); } /* * Move dependencies from one buffer to another. */ int softdep_move_dependencies(oldbp, newbp) struct buf *oldbp; struct buf *newbp; { struct worklist *wk, *wktail; struct ufsmount *ump; int dirty; if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL) return (0); KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, ("softdep_move_dependencies called on non-softdep filesystem")); dirty = 0; wktail = NULL; ump = VFSTOUFS(wk->wk_mp); ACQUIRE_LOCK(ump); while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { LIST_REMOVE(wk, wk_list); if (wk->wk_type == D_BMSAFEMAP && bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp)) dirty = 1; if (wktail == NULL) LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); else LIST_INSERT_AFTER(wktail, wk, wk_list); wktail = wk; } FREE_LOCK(ump); return (dirty); } /* * Purge the work list of all items associated with a particular mount point. */ int softdep_flushworklist(oldmnt, countp, td) struct mount *oldmnt; int *countp; struct thread *td; { struct vnode *devvp; struct ufsmount *ump; int count, error; /* * Alternately flush the block device associated with the mount * point and process any dependencies that the flushing * creates. We continue until no more worklist dependencies * are found. */ *countp = 0; error = 0; ump = VFSTOUFS(oldmnt); devvp = ump->um_devvp; while ((count = softdep_process_worklist(oldmnt, 1)) > 0) { *countp += count; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(devvp, MNT_WAIT, td); VOP_UNLOCK(devvp, 0); if (error != 0) break; } return (error); } #define SU_WAITIDLE_RETRIES 20 static int softdep_waitidle(struct mount *mp, int flags __unused) { struct ufsmount *ump; struct vnode *devvp; struct thread *td; int error, i; ump = VFSTOUFS(mp); devvp = ump->um_devvp; td = curthread; error = 0; ACQUIRE_LOCK(ump); for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) { ump->softdep_req = 1; KASSERT((flags & FORCECLOSE) == 0 || ump->softdep_on_worklist == 0, ("softdep_waitidle: work added after flush")); msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP, "softdeps", 10 * hz); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(devvp, MNT_WAIT, td); VOP_UNLOCK(devvp, 0); ACQUIRE_LOCK(ump); if (error != 0) break; } ump->softdep_req = 0; if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) { error = EBUSY; printf("softdep_waitidle: Failed to flush worklist for %p\n", mp); } FREE_LOCK(ump); return (error); } /* * Flush all vnodes and worklist items associated with a specified mount point. */ int softdep_flushfiles(oldmnt, flags, td) struct mount *oldmnt; int flags; struct thread *td; { #ifdef QUOTA struct ufsmount *ump; int i; #endif int error, early, depcount, loopcnt, retry_flush_count, retry; int morework; KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0, ("softdep_flushfiles called on non-softdep filesystem")); loopcnt = 10; retry_flush_count = 3; retry_flush: error = 0; /* * Alternately flush the vnodes associated with the mount * point and process any dependencies that the flushing * creates. In theory, this loop can happen at most twice, * but we give it a few extra just to be sure. */ for (; loopcnt > 0; loopcnt--) { /* * Do another flush in case any vnodes were brought in * as part of the cleanup operations. */ early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH; if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0) break; if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 || depcount == 0) break; } /* * If we are unmounting then it is an error to fail. If we * are simply trying to downgrade to read-only, then filesystem * activity can keep us busy forever, so we just fail with EBUSY. */ if (loopcnt == 0) { if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) panic("softdep_flushfiles: looping"); error = EBUSY; } if (!error) error = softdep_waitidle(oldmnt, flags); if (!error) { if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) { retry = 0; MNT_ILOCK(oldmnt); KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0, ("softdep_flushfiles: !MNTK_NOINSMNTQ")); morework = oldmnt->mnt_nvnodelistsize > 0; #ifdef QUOTA ump = VFSTOUFS(oldmnt); UFS_LOCK(ump); for (i = 0; i < MAXQUOTAS; i++) { if (ump->um_quotas[i] != NULLVP) morework = 1; } UFS_UNLOCK(ump); #endif if (morework) { if (--retry_flush_count > 0) { retry = 1; loopcnt = 3; } else error = EBUSY; } MNT_IUNLOCK(oldmnt); if (retry) goto retry_flush; } } return (error); } /* * Structure hashing. * * There are four types of structures that can be looked up: * 1) pagedep structures identified by mount point, inode number, * and logical block. * 2) inodedep structures identified by mount point and inode number. * 3) newblk structures identified by mount point and * physical block number. * 4) bmsafemap structures identified by mount point and * cylinder group number. * * The "pagedep" and "inodedep" dependency structures are hashed * separately from the file blocks and inodes to which they correspond. * This separation helps when the in-memory copy of an inode or * file block must be replaced. It also obviates the need to access * an inode or file page when simply updating (or de-allocating) * dependency structures. Lookup of newblk structures is needed to * find newly allocated blocks when trying to associate them with * their allocdirect or allocindir structure. * * The lookup routines optionally create and hash a new instance when * an existing entry is not found. The bmsafemap lookup routine always * allocates a new structure if an existing one is not found. */ #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ /* * Structures and routines associated with pagedep caching. */ #define PAGEDEP_HASH(ump, inum, lbn) \ (&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size]) static int pagedep_find(pagedephd, ino, lbn, pagedeppp) struct pagedep_hashhead *pagedephd; ino_t ino; ufs_lbn_t lbn; struct pagedep **pagedeppp; { struct pagedep *pagedep; LIST_FOREACH(pagedep, pagedephd, pd_hash) { if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) { *pagedeppp = pagedep; return (1); } } *pagedeppp = NULL; return (0); } /* * Look up a pagedep. Return 1 if found, 0 otherwise. * If not found, allocate if DEPALLOC flag is passed. * Found or allocated entry is returned in pagedeppp. * This routine must be called with splbio interrupts blocked. */ static int pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp) struct mount *mp; struct buf *bp; ino_t ino; ufs_lbn_t lbn; int flags; struct pagedep **pagedeppp; { struct pagedep *pagedep; struct pagedep_hashhead *pagedephd; struct worklist *wk; struct ufsmount *ump; int ret; int i; ump = VFSTOUFS(mp); LOCK_OWNED(ump); if (bp) { LIST_FOREACH(wk, &bp->b_dep, wk_list) { if (wk->wk_type == D_PAGEDEP) { *pagedeppp = WK_PAGEDEP(wk); return (1); } } } pagedephd = PAGEDEP_HASH(ump, ino, lbn); ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); if (ret) { if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp) WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list); return (1); } if ((flags & DEPALLOC) == 0) return (0); FREE_LOCK(ump); pagedep = malloc(sizeof(struct pagedep), M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO); workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp); ACQUIRE_LOCK(ump); ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); if (*pagedeppp) { /* * This should never happen since we only create pagedeps * with the vnode lock held. Could be an assert. */ WORKITEM_FREE(pagedep, D_PAGEDEP); return (ret); } pagedep->pd_ino = ino; pagedep->pd_lbn = lbn; LIST_INIT(&pagedep->pd_dirremhd); LIST_INIT(&pagedep->pd_pendinghd); for (i = 0; i < DAHASHSZ; i++) LIST_INIT(&pagedep->pd_diraddhd[i]); LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); *pagedeppp = pagedep; return (0); } /* * Structures and routines associated with inodedep caching. */ #define INODEDEP_HASH(ump, inum) \ (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size]) static int inodedep_find(inodedephd, inum, inodedeppp) struct inodedep_hashhead *inodedephd; ino_t inum; struct inodedep **inodedeppp; { struct inodedep *inodedep; LIST_FOREACH(inodedep, inodedephd, id_hash) if (inum == inodedep->id_ino) break; if (inodedep) { *inodedeppp = inodedep; return (1); } *inodedeppp = NULL; return (0); } /* * Look up an inodedep. Return 1 if found, 0 if not found. * If not found, allocate if DEPALLOC flag is passed. * Found or allocated entry is returned in inodedeppp. * This routine must be called with splbio interrupts blocked. */ static int inodedep_lookup(mp, inum, flags, inodedeppp) struct mount *mp; ino_t inum; int flags; struct inodedep **inodedeppp; { struct inodedep *inodedep; struct inodedep_hashhead *inodedephd; struct ufsmount *ump; struct fs *fs; ump = VFSTOUFS(mp); LOCK_OWNED(ump); fs = ump->um_fs; inodedephd = INODEDEP_HASH(ump, inum); if (inodedep_find(inodedephd, inum, inodedeppp)) return (1); if ((flags & DEPALLOC) == 0) return (0); /* * If the system is over its limit and our filesystem is * responsible for more than our share of that usage and * we are not in a rush, request some inodedep cleanup. */ if (softdep_excess_items(ump, D_INODEDEP)) schedule_cleanup(mp); else FREE_LOCK(ump); inodedep = malloc(sizeof(struct inodedep), M_INODEDEP, M_SOFTDEP_FLAGS); workitem_alloc(&inodedep->id_list, D_INODEDEP, mp); ACQUIRE_LOCK(ump); if (inodedep_find(inodedephd, inum, inodedeppp)) { WORKITEM_FREE(inodedep, D_INODEDEP); return (1); } inodedep->id_fs = fs; inodedep->id_ino = inum; inodedep->id_state = ALLCOMPLETE; inodedep->id_nlinkdelta = 0; inodedep->id_savedino1 = NULL; inodedep->id_savedsize = -1; inodedep->id_savedextsize = -1; inodedep->id_savednlink = -1; inodedep->id_bmsafemap = NULL; inodedep->id_mkdiradd = NULL; LIST_INIT(&inodedep->id_dirremhd); LIST_INIT(&inodedep->id_pendinghd); LIST_INIT(&inodedep->id_inowait); LIST_INIT(&inodedep->id_bufwait); TAILQ_INIT(&inodedep->id_inoreflst); TAILQ_INIT(&inodedep->id_inoupdt); TAILQ_INIT(&inodedep->id_newinoupdt); TAILQ_INIT(&inodedep->id_extupdt); TAILQ_INIT(&inodedep->id_newextupdt); TAILQ_INIT(&inodedep->id_freeblklst); LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); *inodedeppp = inodedep; return (0); } /* * Structures and routines associated with newblk caching. */ #define NEWBLK_HASH(ump, inum) \ (&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size]) static int newblk_find(newblkhd, newblkno, flags, newblkpp) struct newblk_hashhead *newblkhd; ufs2_daddr_t newblkno; int flags; struct newblk **newblkpp; { struct newblk *newblk; LIST_FOREACH(newblk, newblkhd, nb_hash) { if (newblkno != newblk->nb_newblkno) continue; /* * If we're creating a new dependency don't match those that * have already been converted to allocdirects. This is for * a frag extend. */ if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK) continue; break; } if (newblk) { *newblkpp = newblk; return (1); } *newblkpp = NULL; return (0); } /* * Look up a newblk. Return 1 if found, 0 if not found. * If not found, allocate if DEPALLOC flag is passed. * Found or allocated entry is returned in newblkpp. */ static int newblk_lookup(mp, newblkno, flags, newblkpp) struct mount *mp; ufs2_daddr_t newblkno; int flags; struct newblk **newblkpp; { struct newblk *newblk; struct newblk_hashhead *newblkhd; struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); newblkhd = NEWBLK_HASH(ump, newblkno); if (newblk_find(newblkhd, newblkno, flags, newblkpp)) return (1); if ((flags & DEPALLOC) == 0) return (0); if (softdep_excess_items(ump, D_NEWBLK) || softdep_excess_items(ump, D_ALLOCDIRECT) || softdep_excess_items(ump, D_ALLOCINDIR)) schedule_cleanup(mp); else FREE_LOCK(ump); newblk = malloc(sizeof(union allblk), M_NEWBLK, M_SOFTDEP_FLAGS | M_ZERO); workitem_alloc(&newblk->nb_list, D_NEWBLK, mp); ACQUIRE_LOCK(ump); if (newblk_find(newblkhd, newblkno, flags, newblkpp)) { WORKITEM_FREE(newblk, D_NEWBLK); return (1); } newblk->nb_freefrag = NULL; LIST_INIT(&newblk->nb_indirdeps); LIST_INIT(&newblk->nb_newdirblk); LIST_INIT(&newblk->nb_jwork); newblk->nb_state = ATTACHED; newblk->nb_newblkno = newblkno; LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); *newblkpp = newblk; return (0); } /* * Structures and routines associated with freed indirect block caching. */ #define INDIR_HASH(ump, blkno) \ (&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size]) /* * Lookup an indirect block in the indir hash table. The freework is * removed and potentially freed. The caller must do a blocking journal * write before writing to the blkno. */ static int indirblk_lookup(mp, blkno) struct mount *mp; ufs2_daddr_t blkno; { struct freework *freework; struct indir_hashhead *wkhd; struct ufsmount *ump; ump = VFSTOUFS(mp); wkhd = INDIR_HASH(ump, blkno); TAILQ_FOREACH(freework, wkhd, fw_next) { if (freework->fw_blkno != blkno) continue; indirblk_remove(freework); return (1); } return (0); } /* * Insert an indirect block represented by freework into the indirblk * hash table so that it may prevent the block from being re-used prior * to the journal being written. */ static void indirblk_insert(freework) struct freework *freework; { struct jblocks *jblocks; struct jseg *jseg; struct ufsmount *ump; ump = VFSTOUFS(freework->fw_list.wk_mp); jblocks = ump->softdep_jblocks; jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst); if (jseg == NULL) return; LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs); TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next); freework->fw_state &= ~DEPCOMPLETE; } static void indirblk_remove(freework) struct freework *freework; { struct ufsmount *ump; ump = VFSTOUFS(freework->fw_list.wk_mp); LIST_REMOVE(freework, fw_segs); TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next); freework->fw_state |= DEPCOMPLETE; if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) WORKITEM_FREE(freework, D_FREEWORK); } /* * Executed during filesystem system initialization before * mounting any filesystems. */ void softdep_initialize() { TAILQ_INIT(&softdepmounts); #ifdef __LP64__ max_softdeps = desiredvnodes * 4; #else max_softdeps = desiredvnodes * 2; #endif /* initialise bioops hack */ bioops.io_start = softdep_disk_io_initiation; bioops.io_complete = softdep_disk_write_complete; bioops.io_deallocate = softdep_deallocate_dependencies; bioops.io_countdeps = softdep_count_dependencies; softdep_ast_cleanup = softdep_ast_cleanup_proc; /* Initialize the callout with an mtx. */ callout_init_mtx(&softdep_callout, &lk, 0); } /* * Executed after all filesystems have been unmounted during * filesystem module unload. */ void softdep_uninitialize() { /* clear bioops hack */ bioops.io_start = NULL; bioops.io_complete = NULL; bioops.io_deallocate = NULL; bioops.io_countdeps = NULL; softdep_ast_cleanup = NULL; callout_drain(&softdep_callout); } /* * Called at mount time to notify the dependency code that a * filesystem wishes to use it. */ int softdep_mount(devvp, mp, fs, cred) struct vnode *devvp; struct mount *mp; struct fs *fs; struct ucred *cred; { struct csum_total cstotal; struct mount_softdeps *sdp; struct ufsmount *ump; struct cg *cgp; struct buf *bp; int i, error, cyl; sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA, M_WAITOK | M_ZERO); MNT_ILOCK(mp); mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP; if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) { mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) | MNTK_SOFTDEP | MNTK_NOASYNC; } ump = VFSTOUFS(mp); ump->um_softdep = sdp; MNT_IUNLOCK(mp); rw_init(LOCK_PTR(ump), "Per-Filesystem Softdep Lock"); sdp->sd_ump = ump; LIST_INIT(&ump->softdep_workitem_pending); LIST_INIT(&ump->softdep_journal_pending); TAILQ_INIT(&ump->softdep_unlinked); LIST_INIT(&ump->softdep_dirtycg); ump->softdep_worklist_tail = NULL; ump->softdep_on_worklist = 0; ump->softdep_deps = 0; LIST_INIT(&ump->softdep_mkdirlisthd); ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, &ump->pagedep_hash_size); ump->pagedep_nextclean = 0; ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &ump->inodedep_hash_size); ump->inodedep_nextclean = 0; ump->newblk_hashtbl = hashinit(max_softdeps / 2, M_NEWBLK, &ump->newblk_hash_size); ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, &ump->bmsafemap_hash_size); i = 1 << (ffs(desiredvnodes / 10) - 1); ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead), M_FREEWORK, M_WAITOK); ump->indir_hash_size = i - 1; for (i = 0; i <= ump->indir_hash_size; i++) TAILQ_INIT(&ump->indir_hashtbl[i]); ACQUIRE_GBLLOCK(&lk); TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); FREE_GBLLOCK(&lk); if ((fs->fs_flags & FS_SUJ) && (error = journal_mount(mp, fs, cred)) != 0) { printf("Failed to start journal: %d\n", error); softdep_unmount(mp); return (error); } /* * Start our flushing thread in the bufdaemon process. */ ACQUIRE_LOCK(ump); ump->softdep_flags |= FLUSH_STARTING; FREE_LOCK(ump); kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc, &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker", mp->mnt_stat.f_mntonname); ACQUIRE_LOCK(ump); while ((ump->softdep_flags & FLUSH_STARTING) != 0) { msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart", hz / 2); } FREE_LOCK(ump); /* * When doing soft updates, the counters in the * superblock may have gotten out of sync. Recomputation * can take a long time and can be deferred for background * fsck. However, the old behavior of scanning the cylinder * groups and recalculating them at mount time is available * by setting vfs.ffs.compute_summary_at_mount to one. */ if (compute_summary_at_mount == 0 || fs->fs_clean != 0) return (0); bzero(&cstotal, sizeof cstotal); for (cyl = 0; cyl < fs->fs_ncg; cyl++) { if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), fs->fs_cgsize, cred, &bp)) != 0) { brelse(bp); softdep_unmount(mp); return (error); } cgp = (struct cg *)bp->b_data; cstotal.cs_nffree += cgp->cg_cs.cs_nffree; cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; cstotal.cs_nifree += cgp->cg_cs.cs_nifree; cstotal.cs_ndir += cgp->cg_cs.cs_ndir; fs->fs_cs(fs, cyl) = cgp->cg_cs; brelse(bp); } #ifdef DEBUG if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); #endif bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); return (0); } void softdep_unmount(mp) struct mount *mp; { struct ufsmount *ump; #ifdef INVARIANTS int i; #endif KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_unmount called on non-softdep filesystem")); ump = VFSTOUFS(mp); MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_SOFTDEP; if (MOUNTEDSUJ(mp) == 0) { MNT_IUNLOCK(mp); } else { mp->mnt_flag &= ~MNT_SUJ; MNT_IUNLOCK(mp); journal_unmount(ump); } /* * Shut down our flushing thread. Check for NULL is if * softdep_mount errors out before the thread has been created. */ if (ump->softdep_flushtd != NULL) { ACQUIRE_LOCK(ump); ump->softdep_flags |= FLUSH_EXIT; wakeup(&ump->softdep_flushtd); msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM | PDROP, "sdwait", 0); KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0, ("Thread shutdown failed")); } /* * Free up our resources. */ ACQUIRE_GBLLOCK(&lk); TAILQ_REMOVE(&softdepmounts, ump->um_softdep, sd_next); FREE_GBLLOCK(&lk); rw_destroy(LOCK_PTR(ump)); hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size); hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size); hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size); hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP, ump->bmsafemap_hash_size); free(ump->indir_hashtbl, M_FREEWORK); #ifdef INVARIANTS for (i = 0; i <= D_LAST; i++) KASSERT(ump->softdep_curdeps[i] == 0, ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt, TYPENAME(i), ump->softdep_curdeps[i])); #endif free(ump->um_softdep, M_MOUNTDATA); } static struct jblocks * jblocks_create(void) { struct jblocks *jblocks; jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO); TAILQ_INIT(&jblocks->jb_segs); jblocks->jb_avail = 10; jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail, M_JBLOCKS, M_WAITOK | M_ZERO); return (jblocks); } static ufs2_daddr_t jblocks_alloc(jblocks, bytes, actual) struct jblocks *jblocks; int bytes; int *actual; { ufs2_daddr_t daddr; struct jextent *jext; int freecnt; int blocks; blocks = bytes / DEV_BSIZE; jext = &jblocks->jb_extent[jblocks->jb_head]; freecnt = jext->je_blocks - jblocks->jb_off; if (freecnt == 0) { jblocks->jb_off = 0; if (++jblocks->jb_head > jblocks->jb_used) jblocks->jb_head = 0; jext = &jblocks->jb_extent[jblocks->jb_head]; freecnt = jext->je_blocks; } if (freecnt > blocks) freecnt = blocks; *actual = freecnt * DEV_BSIZE; daddr = jext->je_daddr + jblocks->jb_off; jblocks->jb_off += freecnt; jblocks->jb_free -= freecnt; return (daddr); } static void jblocks_free(jblocks, mp, bytes) struct jblocks *jblocks; struct mount *mp; int bytes; { LOCK_OWNED(VFSTOUFS(mp)); jblocks->jb_free += bytes / DEV_BSIZE; if (jblocks->jb_suspended) worklist_speedup(mp); wakeup(jblocks); } static void jblocks_destroy(jblocks) struct jblocks *jblocks; { if (jblocks->jb_extent) free(jblocks->jb_extent, M_JBLOCKS); free(jblocks, M_JBLOCKS); } static void jblocks_add(jblocks, daddr, blocks) struct jblocks *jblocks; ufs2_daddr_t daddr; int blocks; { struct jextent *jext; jblocks->jb_blocks += blocks; jblocks->jb_free += blocks; jext = &jblocks->jb_extent[jblocks->jb_used]; /* Adding the first block. */ if (jext->je_daddr == 0) { jext->je_daddr = daddr; jext->je_blocks = blocks; return; } /* Extending the last extent. */ if (jext->je_daddr + jext->je_blocks == daddr) { jext->je_blocks += blocks; return; } /* Adding a new extent. */ if (++jblocks->jb_used == jblocks->jb_avail) { jblocks->jb_avail *= 2; jext = malloc(sizeof(struct jextent) * jblocks->jb_avail, M_JBLOCKS, M_WAITOK | M_ZERO); memcpy(jext, jblocks->jb_extent, sizeof(struct jextent) * jblocks->jb_used); free(jblocks->jb_extent, M_JBLOCKS); jblocks->jb_extent = jext; } jext = &jblocks->jb_extent[jblocks->jb_used]; jext->je_daddr = daddr; jext->je_blocks = blocks; return; } int softdep_journal_lookup(mp, vpp) struct mount *mp; struct vnode **vpp; { struct componentname cnp; struct vnode *dvp; ino_t sujournal; int error; error = VFS_VGET(mp, UFS_ROOTINO, LK_EXCLUSIVE, &dvp); if (error) return (error); bzero(&cnp, sizeof(cnp)); cnp.cn_nameiop = LOOKUP; cnp.cn_flags = ISLASTCN; cnp.cn_thread = curthread; cnp.cn_cred = curthread->td_ucred; cnp.cn_pnbuf = SUJ_FILE; cnp.cn_nameptr = SUJ_FILE; cnp.cn_namelen = strlen(SUJ_FILE); error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal); vput(dvp); if (error != 0) return (error); error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp); return (error); } /* * Open and verify the journal file. */ static int journal_mount(mp, fs, cred) struct mount *mp; struct fs *fs; struct ucred *cred; { struct jblocks *jblocks; struct ufsmount *ump; struct vnode *vp; struct inode *ip; ufs2_daddr_t blkno; int bcount; int error; int i; ump = VFSTOUFS(mp); ump->softdep_journal_tail = NULL; ump->softdep_on_journal = 0; ump->softdep_accdeps = 0; ump->softdep_req = 0; ump->softdep_jblocks = NULL; error = softdep_journal_lookup(mp, &vp); if (error != 0) { printf("Failed to find journal. Use tunefs to create one\n"); return (error); } ip = VTOI(vp); if (ip->i_size < SUJ_MIN) { error = ENOSPC; goto out; } bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */ jblocks = jblocks_create(); for (i = 0; i < bcount; i++) { error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL); if (error) break; jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag)); } if (error) { jblocks_destroy(jblocks); goto out; } jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */ jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */ ump->softdep_jblocks = jblocks; out: if (error == 0) { MNT_ILOCK(mp); mp->mnt_flag |= MNT_SUJ; mp->mnt_flag &= ~MNT_SOFTDEP; MNT_IUNLOCK(mp); /* * Only validate the journal contents if the * filesystem is clean, otherwise we write the logs * but they'll never be used. If the filesystem was * still dirty when we mounted it the journal is * invalid and a new journal can only be valid if it * starts from a clean mount. */ if (fs->fs_clean) { DIP_SET(ip, i_modrev, fs->fs_mtime); ip->i_flags |= IN_MODIFIED; ffs_update(vp, 1); } } vput(vp); return (error); } static void journal_unmount(ump) struct ufsmount *ump; { if (ump->softdep_jblocks) jblocks_destroy(ump->softdep_jblocks); ump->softdep_jblocks = NULL; } /* * Called when a journal record is ready to be written. Space is allocated * and the journal entry is created when the journal is flushed to stable * store. */ static void add_to_journal(wk) struct worklist *wk; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); LOCK_OWNED(ump); if (wk->wk_state & ONWORKLIST) panic("add_to_journal: %s(0x%X) already on list", TYPENAME(wk->wk_type), wk->wk_state); wk->wk_state |= ONWORKLIST | DEPCOMPLETE; if (LIST_EMPTY(&ump->softdep_journal_pending)) { ump->softdep_jblocks->jb_age = ticks; LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list); } else LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list); ump->softdep_journal_tail = wk; ump->softdep_on_journal += 1; } /* * Remove an arbitrary item for the journal worklist maintain the tail * pointer. This happens when a new operation obviates the need to * journal an old operation. */ static void remove_from_journal(wk) struct worklist *wk; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); LOCK_OWNED(ump); #ifdef SUJ_DEBUG { struct worklist *wkn; LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list) if (wkn == wk) break; if (wkn == NULL) panic("remove_from_journal: %p is not in journal", wk); } #endif /* * We emulate a TAILQ to save space in most structures which do not * require TAILQ semantics. Here we must update the tail position * when removing the tail which is not the final entry. This works * only if the worklist linkage are at the beginning of the structure. */ if (ump->softdep_journal_tail == wk) ump->softdep_journal_tail = (struct worklist *)wk->wk_list.le_prev; WORKLIST_REMOVE(wk); ump->softdep_on_journal -= 1; } /* * Check for journal space as well as dependency limits so the prelink * code can throttle both journaled and non-journaled filesystems. * Threshold is 0 for low and 1 for min. */ static int journal_space(ump, thresh) struct ufsmount *ump; int thresh; { struct jblocks *jblocks; int limit, avail; jblocks = ump->softdep_jblocks; if (jblocks == NULL) return (1); /* * We use a tighter restriction here to prevent request_cleanup() * running in threads from running into locks we currently hold. * We have to be over the limit and our filesystem has to be * responsible for more than our share of that usage. */ limit = (max_softdeps / 10) * 9; if (dep_current[D_INODEDEP] > limit && ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads) return (0); if (thresh) thresh = jblocks->jb_min; else thresh = jblocks->jb_low; avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE; avail = jblocks->jb_free - avail; return (avail > thresh); } static void journal_suspend(ump) struct ufsmount *ump; { struct jblocks *jblocks; struct mount *mp; mp = UFSTOVFS(ump); jblocks = ump->softdep_jblocks; MNT_ILOCK(mp); if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { stat_journal_min++; mp->mnt_kern_flag |= MNTK_SUSPEND; mp->mnt_susp_owner = ump->softdep_flushtd; } jblocks->jb_suspended = 1; MNT_IUNLOCK(mp); } static int journal_unsuspend(struct ufsmount *ump) { struct jblocks *jblocks; struct mount *mp; mp = UFSTOVFS(ump); jblocks = ump->softdep_jblocks; if (jblocks != NULL && jblocks->jb_suspended && journal_space(ump, jblocks->jb_min)) { jblocks->jb_suspended = 0; FREE_LOCK(ump); mp->mnt_susp_owner = curthread; vfs_write_resume(mp, 0); ACQUIRE_LOCK(ump); return (1); } return (0); } /* * Called before any allocation function to be certain that there is * sufficient space in the journal prior to creating any new records. * Since in the case of block allocation we may have multiple locked * buffers at the time of the actual allocation we can not block * when the journal records are created. Doing so would create a deadlock * if any of these buffers needed to be flushed to reclaim space. Instead * we require a sufficiently large amount of available space such that * each thread in the system could have passed this allocation check and * still have sufficient free space. With 20% of a minimum journal size * of 1MB we have 6553 records available. */ int softdep_prealloc(vp, waitok) struct vnode *vp; int waitok; { struct ufsmount *ump; KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, ("softdep_prealloc called on non-softdep filesystem")); /* * Nothing to do if we are not running journaled soft updates. * If we currently hold the snapshot lock, we must avoid * handling other resources that could cause deadlock. Do not * touch quotas vnode since it is typically recursed with * other vnode locks held. */ if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)) || (vp->v_vflag & VV_SYSTEM) != 0) return (0); ump = VFSTOUFS(vp->v_mount); ACQUIRE_LOCK(ump); if (journal_space(ump, 0)) { FREE_LOCK(ump); return (0); } stat_journal_low++; FREE_LOCK(ump); if (waitok == MNT_NOWAIT) return (ENOSPC); /* * Attempt to sync this vnode once to flush any journal * work attached to it. */ if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0) ffs_syncvnode(vp, waitok, 0); ACQUIRE_LOCK(ump); process_removes(vp); process_truncates(vp); if (journal_space(ump, 0) == 0) { softdep_speedup(ump); if (journal_space(ump, 1) == 0) journal_suspend(ump); } FREE_LOCK(ump); return (0); } /* * Before adjusting a link count on a vnode verify that we have sufficient * journal space. If not, process operations that depend on the currently * locked pair of vnodes to try to flush space as the syncer, buf daemon, * and softdep flush threads can not acquire these locks to reclaim space. */ static void softdep_prelink(dvp, vp) struct vnode *dvp; struct vnode *vp; { struct ufsmount *ump; ump = VFSTOUFS(dvp->v_mount); LOCK_OWNED(ump); /* * Nothing to do if we have sufficient journal space. * If we currently hold the snapshot lock, we must avoid * handling other resources that could cause deadlock. */ if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp)))) return; stat_journal_low++; FREE_LOCK(ump); if (vp) ffs_syncvnode(vp, MNT_NOWAIT, 0); ffs_syncvnode(dvp, MNT_WAIT, 0); ACQUIRE_LOCK(ump); /* Process vp before dvp as it may create .. removes. */ if (vp) { process_removes(vp); process_truncates(vp); } process_removes(dvp); process_truncates(dvp); softdep_speedup(ump); process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT); if (journal_space(ump, 0) == 0) { softdep_speedup(ump); if (journal_space(ump, 1) == 0) journal_suspend(ump); } } static void jseg_write(ump, jseg, data) struct ufsmount *ump; struct jseg *jseg; uint8_t *data; { struct jsegrec *rec; rec = (struct jsegrec *)data; rec->jsr_seq = jseg->js_seq; rec->jsr_oldest = jseg->js_oldseq; rec->jsr_cnt = jseg->js_cnt; rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize; rec->jsr_crc = 0; rec->jsr_time = ump->um_fs->fs_mtime; } static inline void inoref_write(inoref, jseg, rec) struct inoref *inoref; struct jseg *jseg; struct jrefrec *rec; { inoref->if_jsegdep->jd_seg = jseg; rec->jr_ino = inoref->if_ino; rec->jr_parent = inoref->if_parent; rec->jr_nlink = inoref->if_nlink; rec->jr_mode = inoref->if_mode; rec->jr_diroff = inoref->if_diroff; } static void jaddref_write(jaddref, jseg, data) struct jaddref *jaddref; struct jseg *jseg; uint8_t *data; { struct jrefrec *rec; rec = (struct jrefrec *)data; rec->jr_op = JOP_ADDREF; inoref_write(&jaddref->ja_ref, jseg, rec); } static void jremref_write(jremref, jseg, data) struct jremref *jremref; struct jseg *jseg; uint8_t *data; { struct jrefrec *rec; rec = (struct jrefrec *)data; rec->jr_op = JOP_REMREF; inoref_write(&jremref->jr_ref, jseg, rec); } static void jmvref_write(jmvref, jseg, data) struct jmvref *jmvref; struct jseg *jseg; uint8_t *data; { struct jmvrec *rec; rec = (struct jmvrec *)data; rec->jm_op = JOP_MVREF; rec->jm_ino = jmvref->jm_ino; rec->jm_parent = jmvref->jm_parent; rec->jm_oldoff = jmvref->jm_oldoff; rec->jm_newoff = jmvref->jm_newoff; } static void jnewblk_write(jnewblk, jseg, data) struct jnewblk *jnewblk; struct jseg *jseg; uint8_t *data; { struct jblkrec *rec; jnewblk->jn_jsegdep->jd_seg = jseg; rec = (struct jblkrec *)data; rec->jb_op = JOP_NEWBLK; rec->jb_ino = jnewblk->jn_ino; rec->jb_blkno = jnewblk->jn_blkno; rec->jb_lbn = jnewblk->jn_lbn; rec->jb_frags = jnewblk->jn_frags; rec->jb_oldfrags = jnewblk->jn_oldfrags; } static void jfreeblk_write(jfreeblk, jseg, data) struct jfreeblk *jfreeblk; struct jseg *jseg; uint8_t *data; { struct jblkrec *rec; jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg; rec = (struct jblkrec *)data; rec->jb_op = JOP_FREEBLK; rec->jb_ino = jfreeblk->jf_ino; rec->jb_blkno = jfreeblk->jf_blkno; rec->jb_lbn = jfreeblk->jf_lbn; rec->jb_frags = jfreeblk->jf_frags; rec->jb_oldfrags = 0; } static void jfreefrag_write(jfreefrag, jseg, data) struct jfreefrag *jfreefrag; struct jseg *jseg; uint8_t *data; { struct jblkrec *rec; jfreefrag->fr_jsegdep->jd_seg = jseg; rec = (struct jblkrec *)data; rec->jb_op = JOP_FREEBLK; rec->jb_ino = jfreefrag->fr_ino; rec->jb_blkno = jfreefrag->fr_blkno; rec->jb_lbn = jfreefrag->fr_lbn; rec->jb_frags = jfreefrag->fr_frags; rec->jb_oldfrags = 0; } static void jtrunc_write(jtrunc, jseg, data) struct jtrunc *jtrunc; struct jseg *jseg; uint8_t *data; { struct jtrncrec *rec; jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg; rec = (struct jtrncrec *)data; rec->jt_op = JOP_TRUNC; rec->jt_ino = jtrunc->jt_ino; rec->jt_size = jtrunc->jt_size; rec->jt_extsize = jtrunc->jt_extsize; } static void jfsync_write(jfsync, jseg, data) struct jfsync *jfsync; struct jseg *jseg; uint8_t *data; { struct jtrncrec *rec; rec = (struct jtrncrec *)data; rec->jt_op = JOP_SYNC; rec->jt_ino = jfsync->jfs_ino; rec->jt_size = jfsync->jfs_size; rec->jt_extsize = jfsync->jfs_extsize; } static void softdep_flushjournal(mp) struct mount *mp; { struct jblocks *jblocks; struct ufsmount *ump; if (MOUNTEDSUJ(mp) == 0) return; ump = VFSTOUFS(mp); jblocks = ump->softdep_jblocks; ACQUIRE_LOCK(ump); while (ump->softdep_on_journal) { jblocks->jb_needseg = 1; softdep_process_journal(mp, NULL, MNT_WAIT); } FREE_LOCK(ump); } static void softdep_synchronize_completed(struct bio *); static void softdep_synchronize(struct bio *, struct ufsmount *, void *); static void softdep_synchronize_completed(bp) struct bio *bp; { struct jseg *oldest; struct jseg *jseg; struct ufsmount *ump; /* * caller1 marks the last segment written before we issued the * synchronize cache. */ jseg = bp->bio_caller1; if (jseg == NULL) { g_destroy_bio(bp); return; } ump = VFSTOUFS(jseg->js_list.wk_mp); ACQUIRE_LOCK(ump); oldest = NULL; /* * Mark all the journal entries waiting on the synchronize cache * as completed so they may continue on. */ while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) { jseg->js_state |= COMPLETE; oldest = jseg; jseg = TAILQ_PREV(jseg, jseglst, js_next); } /* * Restart deferred journal entry processing from the oldest * completed jseg. */ if (oldest) complete_jsegs(oldest); FREE_LOCK(ump); g_destroy_bio(bp); } /* * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering * barriers. The journal must be written prior to any blocks that depend * on it and the journal can not be released until the blocks have be * written. This code handles both barriers simultaneously. */ static void softdep_synchronize(bp, ump, caller1) struct bio *bp; struct ufsmount *ump; void *caller1; { bp->bio_cmd = BIO_FLUSH; bp->bio_flags |= BIO_ORDERED; bp->bio_data = NULL; bp->bio_offset = ump->um_cp->provider->mediasize; bp->bio_length = 0; bp->bio_done = softdep_synchronize_completed; bp->bio_caller1 = caller1; g_io_request(bp, (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private); } /* * Flush some journal records to disk. */ static void softdep_process_journal(mp, needwk, flags) struct mount *mp; struct worklist *needwk; int flags; { struct jblocks *jblocks; struct ufsmount *ump; struct worklist *wk; struct jseg *jseg; struct buf *bp; struct bio *bio; uint8_t *data; struct fs *fs; int shouldflush; int segwritten; int jrecmin; /* Minimum records per block. */ int jrecmax; /* Maximum records per block. */ int size; int cnt; int off; int devbsize; if (MOUNTEDSUJ(mp) == 0) return; shouldflush = softdep_flushcache; bio = NULL; jseg = NULL; ump = VFSTOUFS(mp); LOCK_OWNED(ump); fs = ump->um_fs; jblocks = ump->softdep_jblocks; devbsize = ump->um_devvp->v_bufobj.bo_bsize; /* * We write anywhere between a disk block and fs block. The upper * bound is picked to prevent buffer cache fragmentation and limit * processing time per I/O. */ jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */ jrecmax = (fs->fs_bsize / devbsize) * jrecmin; segwritten = 0; for (;;) { cnt = ump->softdep_on_journal; /* * Criteria for writing a segment: * 1) We have a full block. * 2) We're called from jwait() and haven't found the * journal item yet. * 3) Always write if needseg is set. * 4) If we are called from process_worklist and have * not yet written anything we write a partial block * to enforce a 1 second maximum latency on journal * entries. */ if (cnt < (jrecmax - 1) && needwk == NULL && jblocks->jb_needseg == 0 && (segwritten || cnt == 0)) break; cnt++; /* * Verify some free journal space. softdep_prealloc() should * guarantee that we don't run out so this is indicative of * a problem with the flow control. Try to recover * gracefully in any event. */ while (jblocks->jb_free == 0) { if (flags != MNT_WAIT) break; printf("softdep: Out of journal space!\n"); softdep_speedup(ump); msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz); } FREE_LOCK(ump); jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS); workitem_alloc(&jseg->js_list, D_JSEG, mp); LIST_INIT(&jseg->js_entries); LIST_INIT(&jseg->js_indirs); jseg->js_state = ATTACHED; if (shouldflush == 0) jseg->js_state |= COMPLETE; else if (bio == NULL) bio = g_alloc_bio(); jseg->js_jblocks = jblocks; bp = geteblk(fs->fs_bsize, 0); ACQUIRE_LOCK(ump); /* * If there was a race while we were allocating the block * and jseg the entry we care about was likely written. * We bail out in both the WAIT and NOWAIT case and assume * the caller will loop if the entry it cares about is * not written. */ cnt = ump->softdep_on_journal; if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) { bp->b_flags |= B_INVAL | B_NOCACHE; WORKITEM_FREE(jseg, D_JSEG); FREE_LOCK(ump); brelse(bp); ACQUIRE_LOCK(ump); break; } /* * Calculate the disk block size required for the available * records rounded to the min size. */ if (cnt == 0) size = devbsize; else if (cnt < jrecmax) size = howmany(cnt, jrecmin) * devbsize; else size = fs->fs_bsize; /* * Allocate a disk block for this journal data and account * for truncation of the requested size if enough contiguous * space was not available. */ bp->b_blkno = jblocks_alloc(jblocks, size, &size); bp->b_lblkno = bp->b_blkno; bp->b_offset = bp->b_blkno * DEV_BSIZE; bp->b_bcount = size; bp->b_flags &= ~B_INVAL; bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY; /* * Initialize our jseg with cnt records. Assign the next * sequence number to it and link it in-order. */ cnt = MIN(cnt, (size / devbsize) * jrecmin); jseg->js_buf = bp; jseg->js_cnt = cnt; jseg->js_refs = cnt + 1; /* Self ref. */ jseg->js_size = size; jseg->js_seq = jblocks->jb_nextseq++; if (jblocks->jb_oldestseg == NULL) jblocks->jb_oldestseg = jseg; jseg->js_oldseq = jblocks->jb_oldestseg->js_seq; TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next); if (jblocks->jb_writeseg == NULL) jblocks->jb_writeseg = jseg; /* * Start filling in records from the pending list. */ data = bp->b_data; off = 0; /* * Always put a header on the first block. * XXX As with below, there might not be a chance to get * into the loop. Ensure that something valid is written. */ jseg_write(ump, jseg, data); off += JREC_SIZE; data = bp->b_data + off; /* * XXX Something is wrong here. There's no work to do, * but we need to perform and I/O and allow it to complete * anyways. */ if (LIST_EMPTY(&ump->softdep_journal_pending)) stat_emptyjblocks++; while ((wk = LIST_FIRST(&ump->softdep_journal_pending)) != NULL) { if (cnt == 0) break; /* Place a segment header on every device block. */ if ((off % devbsize) == 0) { jseg_write(ump, jseg, data); off += JREC_SIZE; data = bp->b_data + off; } if (wk == needwk) needwk = NULL; remove_from_journal(wk); wk->wk_state |= INPROGRESS; WORKLIST_INSERT(&jseg->js_entries, wk); switch (wk->wk_type) { case D_JADDREF: jaddref_write(WK_JADDREF(wk), jseg, data); break; case D_JREMREF: jremref_write(WK_JREMREF(wk), jseg, data); break; case D_JMVREF: jmvref_write(WK_JMVREF(wk), jseg, data); break; case D_JNEWBLK: jnewblk_write(WK_JNEWBLK(wk), jseg, data); break; case D_JFREEBLK: jfreeblk_write(WK_JFREEBLK(wk), jseg, data); break; case D_JFREEFRAG: jfreefrag_write(WK_JFREEFRAG(wk), jseg, data); break; case D_JTRUNC: jtrunc_write(WK_JTRUNC(wk), jseg, data); break; case D_JFSYNC: jfsync_write(WK_JFSYNC(wk), jseg, data); break; default: panic("process_journal: Unknown type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } off += JREC_SIZE; data = bp->b_data + off; cnt--; } /* Clear any remaining space so we don't leak kernel data */ if (size > off) bzero(data, size - off); /* * Write this one buffer and continue. */ segwritten = 1; jblocks->jb_needseg = 0; WORKLIST_INSERT(&bp->b_dep, &jseg->js_list); FREE_LOCK(ump); pbgetvp(ump->um_devvp, bp); /* * We only do the blocking wait once we find the journal * entry we're looking for. */ if (needwk == NULL && flags == MNT_WAIT) bwrite(bp); else bawrite(bp); ACQUIRE_LOCK(ump); } /* * If we wrote a segment issue a synchronize cache so the journal * is reflected on disk before the data is written. Since reclaiming * journal space also requires writing a journal record this * process also enforces a barrier before reclamation. */ if (segwritten && shouldflush) { softdep_synchronize(bio, ump, TAILQ_LAST(&jblocks->jb_segs, jseglst)); } else if (bio) g_destroy_bio(bio); /* * If we've suspended the filesystem because we ran out of journal * space either try to sync it here to make some progress or * unsuspend it if we already have. */ if (flags == 0 && jblocks->jb_suspended) { if (journal_unsuspend(ump)) return; FREE_LOCK(ump); VFS_SYNC(mp, MNT_NOWAIT); ffs_sbupdate(ump, MNT_WAIT, 0); ACQUIRE_LOCK(ump); } } /* * Complete a jseg, allowing all dependencies awaiting journal writes * to proceed. Each journal dependency also attaches a jsegdep to dependent * structures so that the journal segment can be freed to reclaim space. */ static void complete_jseg(jseg) struct jseg *jseg; { struct worklist *wk; struct jmvref *jmvref; int waiting; #ifdef INVARIANTS int i = 0; #endif while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) { WORKLIST_REMOVE(wk); waiting = wk->wk_state & IOWAITING; wk->wk_state &= ~(INPROGRESS | IOWAITING); wk->wk_state |= COMPLETE; KASSERT(i++ < jseg->js_cnt, ("handle_written_jseg: overflow %d >= %d", i - 1, jseg->js_cnt)); switch (wk->wk_type) { case D_JADDREF: handle_written_jaddref(WK_JADDREF(wk)); break; case D_JREMREF: handle_written_jremref(WK_JREMREF(wk)); break; case D_JMVREF: rele_jseg(jseg); /* No jsegdep. */ jmvref = WK_JMVREF(wk); LIST_REMOVE(jmvref, jm_deps); if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0) free_pagedep(jmvref->jm_pagedep); WORKITEM_FREE(jmvref, D_JMVREF); break; case D_JNEWBLK: handle_written_jnewblk(WK_JNEWBLK(wk)); break; case D_JFREEBLK: handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep); break; case D_JTRUNC: handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep); break; case D_JFSYNC: rele_jseg(jseg); /* No jsegdep. */ WORKITEM_FREE(wk, D_JFSYNC); break; case D_JFREEFRAG: handle_written_jfreefrag(WK_JFREEFRAG(wk)); break; default: panic("handle_written_jseg: Unknown type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } if (waiting) wakeup(wk); } /* Release the self reference so the structure may be freed. */ rele_jseg(jseg); } /* * Determine which jsegs are ready for completion processing. Waits for * synchronize cache to complete as well as forcing in-order completion * of journal entries. */ static void complete_jsegs(jseg) struct jseg *jseg; { struct jblocks *jblocks; struct jseg *jsegn; jblocks = jseg->js_jblocks; /* * Don't allow out of order completions. If this isn't the first * block wait for it to write before we're done. */ if (jseg != jblocks->jb_writeseg) return; /* Iterate through available jsegs processing their entries. */ while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) { jblocks->jb_oldestwrseq = jseg->js_oldseq; jsegn = TAILQ_NEXT(jseg, js_next); complete_jseg(jseg); jseg = jsegn; } jblocks->jb_writeseg = jseg; /* * Attempt to free jsegs now that oldestwrseq may have advanced. */ free_jsegs(jblocks); } /* * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle * the final completions. */ static void handle_written_jseg(jseg, bp) struct jseg *jseg; struct buf *bp; { if (jseg->js_refs == 0) panic("handle_written_jseg: No self-reference on %p", jseg); jseg->js_state |= DEPCOMPLETE; /* * We'll never need this buffer again, set flags so it will be * discarded. */ bp->b_flags |= B_INVAL | B_NOCACHE; pbrelvp(bp); complete_jsegs(jseg); } static inline struct jsegdep * inoref_jseg(inoref) struct inoref *inoref; { struct jsegdep *jsegdep; jsegdep = inoref->if_jsegdep; inoref->if_jsegdep = NULL; return (jsegdep); } /* * Called once a jremref has made it to stable store. The jremref is marked * complete and we attempt to free it. Any pagedeps writes sleeping waiting * for the jremref to complete will be awoken by free_jremref. */ static void handle_written_jremref(jremref) struct jremref *jremref; { struct inodedep *inodedep; struct jsegdep *jsegdep; struct dirrem *dirrem; /* Grab the jsegdep. */ jsegdep = inoref_jseg(&jremref->jr_ref); /* * Remove us from the inoref list. */ if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, &inodedep) == 0) panic("handle_written_jremref: Lost inodedep"); TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); /* * Complete the dirrem. */ dirrem = jremref->jr_dirrem; jremref->jr_dirrem = NULL; LIST_REMOVE(jremref, jr_deps); jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT; jwork_insert(&dirrem->dm_jwork, jsegdep); if (LIST_EMPTY(&dirrem->dm_jremrefhd) && (dirrem->dm_state & COMPLETE) != 0) add_to_worklist(&dirrem->dm_list, 0); free_jremref(jremref); } /* * Called once a jaddref has made it to stable store. The dependency is * marked complete and any dependent structures are added to the inode * bufwait list to be completed as soon as it is written. If a bitmap write * depends on this entry we move the inode into the inodedephd of the * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap. */ static void handle_written_jaddref(jaddref) struct jaddref *jaddref; { struct jsegdep *jsegdep; struct inodedep *inodedep; struct diradd *diradd; struct mkdir *mkdir; /* Grab the jsegdep. */ jsegdep = inoref_jseg(&jaddref->ja_ref); mkdir = NULL; diradd = NULL; if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 0, &inodedep) == 0) panic("handle_written_jaddref: Lost inodedep."); if (jaddref->ja_diradd == NULL) panic("handle_written_jaddref: No dependency"); if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) { diradd = jaddref->ja_diradd; WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list); } else if (jaddref->ja_state & MKDIR_PARENT) { mkdir = jaddref->ja_mkdir; WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list); } else if (jaddref->ja_state & MKDIR_BODY) mkdir = jaddref->ja_mkdir; else panic("handle_written_jaddref: Unknown dependency %p", jaddref->ja_diradd); jaddref->ja_diradd = NULL; /* also clears ja_mkdir */ /* * Remove us from the inode list. */ TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); /* * The mkdir may be waiting on the jaddref to clear before freeing. */ if (mkdir) { KASSERT(mkdir->md_list.wk_type == D_MKDIR, ("handle_written_jaddref: Incorrect type for mkdir %s", TYPENAME(mkdir->md_list.wk_type))); mkdir->md_jaddref = NULL; diradd = mkdir->md_diradd; mkdir->md_state |= DEPCOMPLETE; complete_mkdir(mkdir); } jwork_insert(&diradd->da_jwork, jsegdep); if (jaddref->ja_state & NEWBLOCK) { inodedep->id_state |= ONDEPLIST; LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd, inodedep, id_deps); } free_jaddref(jaddref); } /* * Called once a jnewblk journal is written. The allocdirect or allocindir * is placed in the bmsafemap to await notification of a written bitmap. If * the operation was canceled we add the segdep to the appropriate * dependency to free the journal space once the canceling operation * completes. */ static void handle_written_jnewblk(jnewblk) struct jnewblk *jnewblk; { struct bmsafemap *bmsafemap; struct freefrag *freefrag; struct freework *freework; struct jsegdep *jsegdep; struct newblk *newblk; /* Grab the jsegdep. */ jsegdep = jnewblk->jn_jsegdep; jnewblk->jn_jsegdep = NULL; if (jnewblk->jn_dep == NULL) panic("handle_written_jnewblk: No dependency for the segdep."); switch (jnewblk->jn_dep->wk_type) { case D_NEWBLK: case D_ALLOCDIRECT: case D_ALLOCINDIR: /* * Add the written block to the bmsafemap so it can * be notified when the bitmap is on disk. */ newblk = WK_NEWBLK(jnewblk->jn_dep); newblk->nb_jnewblk = NULL; if ((newblk->nb_state & GOINGAWAY) == 0) { bmsafemap = newblk->nb_bmsafemap; newblk->nb_state |= ONDEPLIST; LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); } jwork_insert(&newblk->nb_jwork, jsegdep); break; case D_FREEFRAG: /* * A newblock being removed by a freefrag when replaced by * frag extension. */ freefrag = WK_FREEFRAG(jnewblk->jn_dep); freefrag->ff_jdep = NULL; jwork_insert(&freefrag->ff_jwork, jsegdep); break; case D_FREEWORK: /* * A direct block was removed by truncate. */ freework = WK_FREEWORK(jnewblk->jn_dep); freework->fw_jnewblk = NULL; jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep); break; default: panic("handle_written_jnewblk: Unknown type %d.", jnewblk->jn_dep->wk_type); } jnewblk->jn_dep = NULL; free_jnewblk(jnewblk); } /* * Cancel a jfreefrag that won't be needed, probably due to colliding with * an in-flight allocation that has not yet been committed. Divorce us * from the freefrag and mark it DEPCOMPLETE so that it may be added * to the worklist. */ static void cancel_jfreefrag(jfreefrag) struct jfreefrag *jfreefrag; { struct freefrag *freefrag; if (jfreefrag->fr_jsegdep) { free_jsegdep(jfreefrag->fr_jsegdep); jfreefrag->fr_jsegdep = NULL; } freefrag = jfreefrag->fr_freefrag; jfreefrag->fr_freefrag = NULL; free_jfreefrag(jfreefrag); freefrag->ff_state |= DEPCOMPLETE; CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno); } /* * Free a jfreefrag when the parent freefrag is rendered obsolete. */ static void free_jfreefrag(jfreefrag) struct jfreefrag *jfreefrag; { if (jfreefrag->fr_state & INPROGRESS) WORKLIST_REMOVE(&jfreefrag->fr_list); else if (jfreefrag->fr_state & ONWORKLIST) remove_from_journal(&jfreefrag->fr_list); if (jfreefrag->fr_freefrag != NULL) panic("free_jfreefrag: Still attached to a freefrag."); WORKITEM_FREE(jfreefrag, D_JFREEFRAG); } /* * Called when the journal write for a jfreefrag completes. The parent * freefrag is added to the worklist if this completes its dependencies. */ static void handle_written_jfreefrag(jfreefrag) struct jfreefrag *jfreefrag; { struct jsegdep *jsegdep; struct freefrag *freefrag; /* Grab the jsegdep. */ jsegdep = jfreefrag->fr_jsegdep; jfreefrag->fr_jsegdep = NULL; freefrag = jfreefrag->fr_freefrag; if (freefrag == NULL) panic("handle_written_jfreefrag: No freefrag."); freefrag->ff_state |= DEPCOMPLETE; freefrag->ff_jdep = NULL; jwork_insert(&freefrag->ff_jwork, jsegdep); if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) add_to_worklist(&freefrag->ff_list, 0); jfreefrag->fr_freefrag = NULL; free_jfreefrag(jfreefrag); } /* * Called when the journal write for a jfreeblk completes. The jfreeblk * is removed from the freeblks list of pending journal writes and the * jsegdep is moved to the freeblks jwork to be completed when all blocks * have been reclaimed. */ static void handle_written_jblkdep(jblkdep) struct jblkdep *jblkdep; { struct freeblks *freeblks; struct jsegdep *jsegdep; /* Grab the jsegdep. */ jsegdep = jblkdep->jb_jsegdep; jblkdep->jb_jsegdep = NULL; freeblks = jblkdep->jb_freeblks; LIST_REMOVE(jblkdep, jb_deps); jwork_insert(&freeblks->fb_jwork, jsegdep); /* * If the freeblks is all journaled, we can add it to the worklist. */ if (LIST_EMPTY(&freeblks->fb_jblkdephd) && (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) add_to_worklist(&freeblks->fb_list, WK_NODELAY); free_jblkdep(jblkdep); } static struct jsegdep * newjsegdep(struct worklist *wk) { struct jsegdep *jsegdep; jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS); workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp); jsegdep->jd_seg = NULL; return (jsegdep); } static struct jmvref * newjmvref(dp, ino, oldoff, newoff) struct inode *dp; ino_t ino; off_t oldoff; off_t newoff; { struct jmvref *jmvref; jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS); workitem_alloc(&jmvref->jm_list, D_JMVREF, ITOVFS(dp)); jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE; jmvref->jm_parent = dp->i_number; jmvref->jm_ino = ino; jmvref->jm_oldoff = oldoff; jmvref->jm_newoff = newoff; return (jmvref); } /* * Allocate a new jremref that tracks the removal of ip from dp with the * directory entry offset of diroff. Mark the entry as ATTACHED and * DEPCOMPLETE as we have all the information required for the journal write * and the directory has already been removed from the buffer. The caller * is responsible for linking the jremref into the pagedep and adding it * to the journal to write. The MKDIR_PARENT flag is set if we're doing * a DOTDOT addition so handle_workitem_remove() can properly assign * the jsegdep when we're done. */ static struct jremref * newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip, off_t diroff, nlink_t nlink) { struct jremref *jremref; jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS); workitem_alloc(&jremref->jr_list, D_JREMREF, ITOVFS(dp)); jremref->jr_state = ATTACHED; newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff, nlink, ip->i_mode); jremref->jr_dirrem = dirrem; return (jremref); } static inline void newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff, nlink_t nlink, uint16_t mode) { inoref->if_jsegdep = newjsegdep(&inoref->if_list); inoref->if_diroff = diroff; inoref->if_ino = ino; inoref->if_parent = parent; inoref->if_nlink = nlink; inoref->if_mode = mode; } /* * Allocate a new jaddref to track the addition of ino to dp at diroff. The * directory offset may not be known until later. The caller is responsible * adding the entry to the journal when this information is available. nlink * should be the link count prior to the addition and mode is only required * to have the correct FMT. */ static struct jaddref * newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink, uint16_t mode) { struct jaddref *jaddref; jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS); workitem_alloc(&jaddref->ja_list, D_JADDREF, ITOVFS(dp)); jaddref->ja_state = ATTACHED; jaddref->ja_mkdir = NULL; newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode); return (jaddref); } /* * Create a new free dependency for a freework. The caller is responsible * for adjusting the reference count when it has the lock held. The freedep * will track an outstanding bitmap write that will ultimately clear the * freework to continue. */ static struct freedep * newfreedep(struct freework *freework) { struct freedep *freedep; freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS); workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp); freedep->fd_freework = freework; return (freedep); } /* * Free a freedep structure once the buffer it is linked to is written. If * this is the last reference to the freework schedule it for completion. */ static void free_freedep(freedep) struct freedep *freedep; { struct freework *freework; freework = freedep->fd_freework; freework->fw_freeblks->fb_cgwait--; if (--freework->fw_ref == 0) freework_enqueue(freework); WORKITEM_FREE(freedep, D_FREEDEP); } /* * Allocate a new freework structure that may be a level in an indirect * when parent is not NULL or a top level block when it is. The top level * freework structures are allocated without the per-filesystem lock held * and before the freeblks is visible outside of softdep_setup_freeblocks(). */ static struct freework * newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal) struct ufsmount *ump; struct freeblks *freeblks; struct freework *parent; ufs_lbn_t lbn; ufs2_daddr_t nb; int frags; int off; int journal; { struct freework *freework; freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS); workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp); freework->fw_state = ATTACHED; freework->fw_jnewblk = NULL; freework->fw_freeblks = freeblks; freework->fw_parent = parent; freework->fw_lbn = lbn; freework->fw_blkno = nb; freework->fw_frags = frags; freework->fw_indir = NULL; freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -UFS_NXADDR) ? 0 : NINDIR(ump->um_fs) + 1; freework->fw_start = freework->fw_off = off; if (journal) newjfreeblk(freeblks, lbn, nb, frags); if (parent == NULL) { ACQUIRE_LOCK(ump); WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); freeblks->fb_ref++; FREE_LOCK(ump); } return (freework); } /* * Eliminate a jfreeblk for a block that does not need journaling. */ static void cancel_jfreeblk(freeblks, blkno) struct freeblks *freeblks; ufs2_daddr_t blkno; { struct jfreeblk *jfreeblk; struct jblkdep *jblkdep; LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) { if (jblkdep->jb_list.wk_type != D_JFREEBLK) continue; jfreeblk = WK_JFREEBLK(&jblkdep->jb_list); if (jfreeblk->jf_blkno == blkno) break; } if (jblkdep == NULL) return; CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno); free_jsegdep(jblkdep->jb_jsegdep); LIST_REMOVE(jblkdep, jb_deps); WORKITEM_FREE(jfreeblk, D_JFREEBLK); } /* * Allocate a new jfreeblk to journal top level block pointer when truncating * a file. The caller must add this to the worklist when the per-filesystem * lock is held. */ static struct jfreeblk * newjfreeblk(freeblks, lbn, blkno, frags) struct freeblks *freeblks; ufs_lbn_t lbn; ufs2_daddr_t blkno; int frags; { struct jfreeblk *jfreeblk; jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS); workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK, freeblks->fb_list.wk_mp); jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list); jfreeblk->jf_dep.jb_freeblks = freeblks; jfreeblk->jf_ino = freeblks->fb_inum; jfreeblk->jf_lbn = lbn; jfreeblk->jf_blkno = blkno; jfreeblk->jf_frags = frags; LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps); return (jfreeblk); } /* * The journal is only prepared to handle full-size block numbers, so we * have to adjust the record to reflect the change to a full-size block. * For example, suppose we have a block made up of fragments 8-15 and * want to free its last two fragments. We are given a request that says: * FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0 * where frags are the number of fragments to free and oldfrags are the * number of fragments to keep. To block align it, we have to change it to * have a valid full-size blkno, so it becomes: * FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6 */ static void adjust_newfreework(freeblks, frag_offset) struct freeblks *freeblks; int frag_offset; { struct jfreeblk *jfreeblk; KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL && LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK), ("adjust_newfreework: Missing freeblks dependency")); jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd)); jfreeblk->jf_blkno -= frag_offset; jfreeblk->jf_frags += frag_offset; } /* * Allocate a new jtrunc to track a partial truncation. */ static struct jtrunc * newjtrunc(freeblks, size, extsize) struct freeblks *freeblks; off_t size; int extsize; { struct jtrunc *jtrunc; jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS); workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC, freeblks->fb_list.wk_mp); jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list); jtrunc->jt_dep.jb_freeblks = freeblks; jtrunc->jt_ino = freeblks->fb_inum; jtrunc->jt_size = size; jtrunc->jt_extsize = extsize; LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps); return (jtrunc); } /* * If we're canceling a new bitmap we have to search for another ref * to move into the bmsafemap dep. This might be better expressed * with another structure. */ static void move_newblock_dep(jaddref, inodedep) struct jaddref *jaddref; struct inodedep *inodedep; { struct inoref *inoref; struct jaddref *jaddrefn; jaddrefn = NULL; for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; inoref = TAILQ_NEXT(inoref, if_deps)) { if ((jaddref->ja_state & NEWBLOCK) && inoref->if_list.wk_type == D_JADDREF) { jaddrefn = (struct jaddref *)inoref; break; } } if (jaddrefn == NULL) return; jaddrefn->ja_state &= ~(ATTACHED | UNDONE); jaddrefn->ja_state |= jaddref->ja_state & (ATTACHED | UNDONE | NEWBLOCK); jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK); jaddref->ja_state |= ATTACHED; LIST_REMOVE(jaddref, ja_bmdeps); LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn, ja_bmdeps); } /* * Cancel a jaddref either before it has been written or while it is being * written. This happens when a link is removed before the add reaches * the disk. The jaddref dependency is kept linked into the bmsafemap * and inode to prevent the link count or bitmap from reaching the disk * until handle_workitem_remove() re-adjusts the counts and bitmaps as * required. * * Returns 1 if the canceled addref requires journaling of the remove and * 0 otherwise. */ static int cancel_jaddref(jaddref, inodedep, wkhd) struct jaddref *jaddref; struct inodedep *inodedep; struct workhead *wkhd; { struct inoref *inoref; struct jsegdep *jsegdep; int needsj; KASSERT((jaddref->ja_state & COMPLETE) == 0, ("cancel_jaddref: Canceling complete jaddref")); if (jaddref->ja_state & (INPROGRESS | COMPLETE)) needsj = 1; else needsj = 0; if (inodedep == NULL) if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 0, &inodedep) == 0) panic("cancel_jaddref: Lost inodedep"); /* * We must adjust the nlink of any reference operation that follows * us so that it is consistent with the in-memory reference. This * ensures that inode nlink rollbacks always have the correct link. */ if (needsj == 0) { for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; inoref = TAILQ_NEXT(inoref, if_deps)) { if (inoref->if_state & GOINGAWAY) break; inoref->if_nlink--; } } jsegdep = inoref_jseg(&jaddref->ja_ref); if (jaddref->ja_state & NEWBLOCK) move_newblock_dep(jaddref, inodedep); wake_worklist(&jaddref->ja_list); jaddref->ja_mkdir = NULL; if (jaddref->ja_state & INPROGRESS) { jaddref->ja_state &= ~INPROGRESS; WORKLIST_REMOVE(&jaddref->ja_list); jwork_insert(wkhd, jsegdep); } else { free_jsegdep(jsegdep); if (jaddref->ja_state & DEPCOMPLETE) remove_from_journal(&jaddref->ja_list); } jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE); /* * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove * can arrange for them to be freed with the bitmap. Otherwise we * no longer need this addref attached to the inoreflst and it * will incorrectly adjust nlink if we leave it. */ if ((jaddref->ja_state & NEWBLOCK) == 0) { TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); jaddref->ja_state |= COMPLETE; free_jaddref(jaddref); return (needsj); } /* * Leave the head of the list for jsegdeps for fast merging. */ if (LIST_FIRST(wkhd) != NULL) { jaddref->ja_state |= ONWORKLIST; LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list); } else WORKLIST_INSERT(wkhd, &jaddref->ja_list); return (needsj); } /* * Attempt to free a jaddref structure when some work completes. This * should only succeed once the entry is written and all dependencies have * been notified. */ static void free_jaddref(jaddref) struct jaddref *jaddref; { if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE) return; if (jaddref->ja_ref.if_jsegdep) panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n", jaddref, jaddref->ja_state); if (jaddref->ja_state & NEWBLOCK) LIST_REMOVE(jaddref, ja_bmdeps); if (jaddref->ja_state & (INPROGRESS | ONWORKLIST)) panic("free_jaddref: Bad state %p(0x%X)", jaddref, jaddref->ja_state); if (jaddref->ja_mkdir != NULL) panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state); WORKITEM_FREE(jaddref, D_JADDREF); } /* * Free a jremref structure once it has been written or discarded. */ static void free_jremref(jremref) struct jremref *jremref; { if (jremref->jr_ref.if_jsegdep) free_jsegdep(jremref->jr_ref.if_jsegdep); if (jremref->jr_state & INPROGRESS) panic("free_jremref: IO still pending"); WORKITEM_FREE(jremref, D_JREMREF); } /* * Free a jnewblk structure. */ static void free_jnewblk(jnewblk) struct jnewblk *jnewblk; { if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE) return; LIST_REMOVE(jnewblk, jn_deps); if (jnewblk->jn_dep != NULL) panic("free_jnewblk: Dependency still attached."); WORKITEM_FREE(jnewblk, D_JNEWBLK); } /* * Cancel a jnewblk which has been been made redundant by frag extension. */ static void cancel_jnewblk(jnewblk, wkhd) struct jnewblk *jnewblk; struct workhead *wkhd; { struct jsegdep *jsegdep; CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno); jsegdep = jnewblk->jn_jsegdep; if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL) panic("cancel_jnewblk: Invalid state"); jnewblk->jn_jsegdep = NULL; jnewblk->jn_dep = NULL; jnewblk->jn_state |= GOINGAWAY; if (jnewblk->jn_state & INPROGRESS) { jnewblk->jn_state &= ~INPROGRESS; WORKLIST_REMOVE(&jnewblk->jn_list); jwork_insert(wkhd, jsegdep); } else { free_jsegdep(jsegdep); remove_from_journal(&jnewblk->jn_list); } wake_worklist(&jnewblk->jn_list); WORKLIST_INSERT(wkhd, &jnewblk->jn_list); } static void free_jblkdep(jblkdep) struct jblkdep *jblkdep; { if (jblkdep->jb_list.wk_type == D_JFREEBLK) WORKITEM_FREE(jblkdep, D_JFREEBLK); else if (jblkdep->jb_list.wk_type == D_JTRUNC) WORKITEM_FREE(jblkdep, D_JTRUNC); else panic("free_jblkdep: Unexpected type %s", TYPENAME(jblkdep->jb_list.wk_type)); } /* * Free a single jseg once it is no longer referenced in memory or on * disk. Reclaim journal blocks and dependencies waiting for the segment * to disappear. */ static void free_jseg(jseg, jblocks) struct jseg *jseg; struct jblocks *jblocks; { struct freework *freework; /* * Free freework structures that were lingering to indicate freed * indirect blocks that forced journal write ordering on reallocate. */ while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL) indirblk_remove(freework); if (jblocks->jb_oldestseg == jseg) jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next); TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next); jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size); KASSERT(LIST_EMPTY(&jseg->js_entries), ("free_jseg: Freed jseg has valid entries.")); WORKITEM_FREE(jseg, D_JSEG); } /* * Free all jsegs that meet the criteria for being reclaimed and update * oldestseg. */ static void free_jsegs(jblocks) struct jblocks *jblocks; { struct jseg *jseg; /* * Free only those jsegs which have none allocated before them to * preserve the journal space ordering. */ while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) { /* * Only reclaim space when nothing depends on this journal * set and another set has written that it is no longer * valid. */ if (jseg->js_refs != 0) { jblocks->jb_oldestseg = jseg; return; } if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE) break; if (jseg->js_seq > jblocks->jb_oldestwrseq) break; /* * We can free jsegs that didn't write entries when * oldestwrseq == js_seq. */ if (jseg->js_seq == jblocks->jb_oldestwrseq && jseg->js_cnt != 0) break; free_jseg(jseg, jblocks); } /* * If we exited the loop above we still must discover the * oldest valid segment. */ if (jseg) for (jseg = jblocks->jb_oldestseg; jseg != NULL; jseg = TAILQ_NEXT(jseg, js_next)) if (jseg->js_refs != 0) break; jblocks->jb_oldestseg = jseg; /* * The journal has no valid records but some jsegs may still be * waiting on oldestwrseq to advance. We force a small record * out to permit these lingering records to be reclaimed. */ if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs)) jblocks->jb_needseg = 1; } /* * Release one reference to a jseg and free it if the count reaches 0. This * should eventually reclaim journal space as well. */ static void rele_jseg(jseg) struct jseg *jseg; { KASSERT(jseg->js_refs > 0, ("free_jseg: Invalid refcnt %d", jseg->js_refs)); if (--jseg->js_refs != 0) return; free_jsegs(jseg->js_jblocks); } /* * Release a jsegdep and decrement the jseg count. */ static void free_jsegdep(jsegdep) struct jsegdep *jsegdep; { if (jsegdep->jd_seg) rele_jseg(jsegdep->jd_seg); WORKITEM_FREE(jsegdep, D_JSEGDEP); } /* * Wait for a journal item to make it to disk. Initiate journal processing * if required. */ static int jwait(wk, waitfor) struct worklist *wk; int waitfor; { LOCK_OWNED(VFSTOUFS(wk->wk_mp)); /* * Blocking journal waits cause slow synchronous behavior. Record * stats on the frequency of these blocking operations. */ if (waitfor == MNT_WAIT) { stat_journal_wait++; switch (wk->wk_type) { case D_JREMREF: case D_JMVREF: stat_jwait_filepage++; break; case D_JTRUNC: case D_JFREEBLK: stat_jwait_freeblks++; break; case D_JNEWBLK: stat_jwait_newblk++; break; case D_JADDREF: stat_jwait_inode++; break; default: break; } } /* * If IO has not started we process the journal. We can't mark the * worklist item as IOWAITING because we drop the lock while * processing the journal and the worklist entry may be freed after * this point. The caller may call back in and re-issue the request. */ if ((wk->wk_state & INPROGRESS) == 0) { softdep_process_journal(wk->wk_mp, wk, waitfor); if (waitfor != MNT_WAIT) return (EBUSY); return (0); } if (waitfor != MNT_WAIT) return (EBUSY); wait_worklist(wk, "jwait"); return (0); } /* * Lookup an inodedep based on an inode pointer and set the nlinkdelta as * appropriate. This is a convenience function to reduce duplicate code * for the setup and revert functions below. */ static struct inodedep * inodedep_lookup_ip(ip) struct inode *ip; { struct inodedep *inodedep; KASSERT(ip->i_nlink >= ip->i_effnlink, ("inodedep_lookup_ip: bad delta")); (void) inodedep_lookup(ITOVFS(ip), ip->i_number, DEPALLOC, &inodedep); inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); return (inodedep); } /* * Called prior to creating a new inode and linking it to a directory. The * jaddref structure must already be allocated by softdep_setup_inomapdep * and it is discovered here so we can initialize the mode and update * nlinkdelta. */ void softdep_setup_create(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_create called on non-softdep filesystem")); KASSERT(ip->i_nlink == 1, ("softdep_setup_create: Invalid link count.")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, ("softdep_setup_create: No addref structure present.")); } softdep_prelink(dvp, NULL); FREE_LOCK(ITOUMP(dp)); } /* * Create a jaddref structure to track the addition of a DOTDOT link when * we are reparenting an inode as part of a rename. This jaddref will be * found by softdep_setup_directory_change. Adjusts nlinkdelta for * non-journaling softdep. */ void softdep_setup_dotdot_link(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_dotdot_link called on non-softdep filesystem")); dvp = ITOV(dp); jaddref = NULL; /* * We don't set MKDIR_PARENT as this is not tied to a mkdir and * is used as a normal link would be. */ if (DOINGSUJ(dvp)) jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, dp->i_effnlink - 1, dp->i_mode); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(dp); if (jaddref) TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); softdep_prelink(dvp, ITOV(ip)); FREE_LOCK(ITOUMP(dp)); } /* * Create a jaddref structure to track a new link to an inode. The directory * offset is not known until softdep_setup_directory_add or * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling * softdep. */ void softdep_setup_link(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_link called on non-softdep filesystem")); dvp = ITOV(dp); jaddref = NULL; if (DOINGSUJ(dvp)) jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1, ip->i_mode); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (jaddref) TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); softdep_prelink(dvp, ITOV(ip)); FREE_LOCK(ITOUMP(dp)); } /* * Called to create the jaddref structures to track . and .. references as * well as lookup and further initialize the incomplete jaddref created * by softdep_setup_inomapdep when the inode was allocated. Adjusts * nlinkdelta for non-journaling softdep. */ void softdep_setup_mkdir(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *dotdotaddref; struct jaddref *dotaddref; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_mkdir called on non-softdep filesystem")); dvp = ITOV(dp); dotaddref = dotdotaddref = NULL; if (DOINGSUJ(dvp)) { dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1, ip->i_mode); dotaddref->ja_state |= MKDIR_BODY; dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, dp->i_effnlink - 1, dp->i_mode); dotdotaddref->ja_state |= MKDIR_PARENT; } ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL, ("softdep_setup_mkdir: No addref structure present.")); KASSERT(jaddref->ja_parent == dp->i_number, ("softdep_setup_mkdir: bad parent %ju", (uintmax_t)jaddref->ja_parent)); TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref, if_deps); } inodedep = inodedep_lookup_ip(dp); if (DOINGSUJ(dvp)) TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &dotdotaddref->ja_ref, if_deps); softdep_prelink(ITOV(dp), NULL); FREE_LOCK(ITOUMP(dp)); } /* * Called to track nlinkdelta of the inode and parent directories prior to * unlinking a directory. */ void softdep_setup_rmdir(dp, ip) struct inode *dp; struct inode *ip; { struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_rmdir called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); (void) inodedep_lookup_ip(ip); (void) inodedep_lookup_ip(dp); softdep_prelink(dvp, ITOV(ip)); FREE_LOCK(ITOUMP(dp)); } /* * Called to track nlinkdelta of the inode and parent directories prior to * unlink. */ void softdep_setup_unlink(dp, ip) struct inode *dp; struct inode *ip; { struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_unlink called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); (void) inodedep_lookup_ip(ip); (void) inodedep_lookup_ip(dp); softdep_prelink(dvp, ITOV(ip)); FREE_LOCK(ITOUMP(dp)); } /* * Called to release the journal structures created by a failed non-directory * creation. Adjusts nlinkdelta for non-journaling softdep. */ void softdep_revert_create(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS((dp))) != 0, ("softdep_revert_create called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref->ja_parent == dp->i_number, ("softdep_revert_create: addref parent mismatch")); cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); } FREE_LOCK(ITOUMP(dp)); } /* * Called to release the journal structures created by a failed link * addition. Adjusts nlinkdelta for non-journaling softdep. */ void softdep_revert_link(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_revert_link called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref->ja_parent == dp->i_number, ("softdep_revert_link: addref parent mismatch")); cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); } FREE_LOCK(ITOUMP(dp)); } /* * Called to release the journal structures created by a failed mkdir * attempt. Adjusts nlinkdelta for non-journaling softdep. */ void softdep_revert_mkdir(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct jaddref *dotaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_revert_mkdir called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(dp); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref->ja_parent == ip->i_number, ("softdep_revert_mkdir: dotdot addref parent mismatch")); cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); } inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref->ja_parent == dp->i_number, ("softdep_revert_mkdir: addref parent mismatch")); dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, inoreflst, if_deps); cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); KASSERT(dotaddref->ja_parent == ip->i_number, ("softdep_revert_mkdir: dot addref parent mismatch")); cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait); } FREE_LOCK(ITOUMP(dp)); } /* * Called to correct nlinkdelta after a failed rmdir. */ void softdep_revert_rmdir(dp, ip) struct inode *dp; struct inode *ip; { KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_revert_rmdir called on non-softdep filesystem")); ACQUIRE_LOCK(ITOUMP(dp)); (void) inodedep_lookup_ip(ip); (void) inodedep_lookup_ip(dp); FREE_LOCK(ITOUMP(dp)); } /* * Protecting the freemaps (or bitmaps). * * To eliminate the need to execute fsck before mounting a filesystem * after a power failure, one must (conservatively) guarantee that the * on-disk copy of the bitmaps never indicate that a live inode or block is * free. So, when a block or inode is allocated, the bitmap should be * updated (on disk) before any new pointers. When a block or inode is * freed, the bitmap should not be updated until all pointers have been * reset. The latter dependency is handled by the delayed de-allocation * approach described below for block and inode de-allocation. The former * dependency is handled by calling the following procedure when a block or * inode is allocated. When an inode is allocated an "inodedep" is created * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. * Each "inodedep" is also inserted into the hash indexing structure so * that any additional link additions can be made dependent on the inode * allocation. * * The ufs filesystem maintains a number of free block counts (e.g., per * cylinder group, per cylinder and per pair) * in addition to the bitmaps. These counts are used to improve efficiency * during allocation and therefore must be consistent with the bitmaps. * There is no convenient way to guarantee post-crash consistency of these * counts with simple update ordering, for two main reasons: (1) The counts * and bitmaps for a single cylinder group block are not in the same disk * sector. If a disk write is interrupted (e.g., by power failure), one may * be written and the other not. (2) Some of the counts are located in the * superblock rather than the cylinder group block. So, we focus our soft * updates implementation on protecting the bitmaps. When mounting a * filesystem, we recompute the auxiliary counts from the bitmaps. */ /* * Called just after updating the cylinder group block to allocate an inode. */ void softdep_setup_inomapdep(bp, ip, newinum, mode) struct buf *bp; /* buffer for cylgroup block with inode map */ struct inode *ip; /* inode related to allocation */ ino_t newinum; /* new inode number being allocated */ int mode; { struct inodedep *inodedep; struct bmsafemap *bmsafemap; struct jaddref *jaddref; struct mount *mp; struct fs *fs; mp = ITOVFS(ip); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_inomapdep called on non-softdep filesystem")); fs = VFSTOUFS(mp)->um_fs; jaddref = NULL; /* * Allocate the journal reference add structure so that the bitmap * can be dependent on it. */ if (MOUNTEDSUJ(mp)) { jaddref = newjaddref(ip, newinum, 0, 0, mode); jaddref->ja_state |= NEWBLOCK; } /* * Create a dependency for the newly allocated inode. * Panic if it already exists as something is seriously wrong. * Otherwise add it to the dependency list for the buffer holding * the cylinder group map from which it was allocated. * * We have to preallocate a bmsafemap entry in case it is needed * in bmsafemap_lookup since once we allocate the inodedep, we * have to finish initializing it before we can FREE_LOCK(). * By preallocating, we avoid FREE_LOCK() while doing a malloc * in bmsafemap_lookup. We cannot call bmsafemap_lookup before * creating the inodedep as it can be freed during the time * that we FREE_LOCK() while allocating the inodedep. We must * call workitem_alloc() before entering the locked section as * it also acquires the lock and we must avoid trying doing so * recursively. */ bmsafemap = malloc(sizeof(struct bmsafemap), M_BMSAFEMAP, M_SOFTDEP_FLAGS); workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); ACQUIRE_LOCK(ITOUMP(ip)); if ((inodedep_lookup(mp, newinum, DEPALLOC, &inodedep))) panic("softdep_setup_inomapdep: dependency %p for new" "inode already exists", inodedep); bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap); if (jaddref) { LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps); TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); } else { inodedep->id_state |= ONDEPLIST; LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); } inodedep->id_bmsafemap = bmsafemap; inodedep->id_state &= ~DEPCOMPLETE; FREE_LOCK(ITOUMP(ip)); } /* * Called just after updating the cylinder group block to * allocate block or fragment. */ void softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) struct buf *bp; /* buffer for cylgroup block with block map */ struct mount *mp; /* filesystem doing allocation */ ufs2_daddr_t newblkno; /* number of newly allocated block */ int frags; /* Number of fragments. */ int oldfrags; /* Previous number of fragments for extend. */ { struct newblk *newblk; struct bmsafemap *bmsafemap; struct jnewblk *jnewblk; struct ufsmount *ump; struct fs *fs; KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_blkmapdep called on non-softdep filesystem")); ump = VFSTOUFS(mp); fs = ump->um_fs; jnewblk = NULL; /* * Create a dependency for the newly allocated block. * Add it to the dependency list for the buffer holding * the cylinder group map from which it was allocated. */ if (MOUNTEDSUJ(mp)) { jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS); workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp); jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list); jnewblk->jn_state = ATTACHED; jnewblk->jn_blkno = newblkno; jnewblk->jn_frags = frags; jnewblk->jn_oldfrags = oldfrags; #ifdef SUJ_DEBUG { struct cg *cgp; uint8_t *blksfree; long bno; int i; cgp = (struct cg *)bp->b_data; blksfree = cg_blksfree(cgp); bno = dtogd(fs, jnewblk->jn_blkno); for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { if (isset(blksfree, bno + i)) panic("softdep_setup_blkmapdep: " "free fragment %d from %d-%d " "state 0x%X dep %p", i, jnewblk->jn_oldfrags, jnewblk->jn_frags, jnewblk->jn_state, jnewblk->jn_dep); } } #endif } CTR3(KTR_SUJ, "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d", newblkno, frags, oldfrags); ACQUIRE_LOCK(ump); if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0) panic("softdep_setup_blkmapdep: found block"); newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, newblkno), NULL); if (jnewblk) { jnewblk->jn_dep = (struct worklist *)newblk; LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps); } else { newblk->nb_state |= ONDEPLIST; LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); } newblk->nb_bmsafemap = bmsafemap; newblk->nb_jnewblk = jnewblk; FREE_LOCK(ump); } #define BMSAFEMAP_HASH(ump, cg) \ (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size]) static int bmsafemap_find(bmsafemaphd, cg, bmsafemapp) struct bmsafemap_hashhead *bmsafemaphd; int cg; struct bmsafemap **bmsafemapp; { struct bmsafemap *bmsafemap; LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash) if (bmsafemap->sm_cg == cg) break; if (bmsafemap) { *bmsafemapp = bmsafemap; return (1); } *bmsafemapp = NULL; return (0); } /* * Find the bmsafemap associated with a cylinder group buffer. * If none exists, create one. The buffer must be locked when * this routine is called and this routine must be called with * the softdep lock held. To avoid giving up the lock while * allocating a new bmsafemap, a preallocated bmsafemap may be * provided. If it is provided but not needed, it is freed. */ static struct bmsafemap * bmsafemap_lookup(mp, bp, cg, newbmsafemap) struct mount *mp; struct buf *bp; int cg; struct bmsafemap *newbmsafemap; { struct bmsafemap_hashhead *bmsafemaphd; struct bmsafemap *bmsafemap, *collision; struct worklist *wk; struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer")); LIST_FOREACH(wk, &bp->b_dep, wk_list) { if (wk->wk_type == D_BMSAFEMAP) { if (newbmsafemap) WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); return (WK_BMSAFEMAP(wk)); } } bmsafemaphd = BMSAFEMAP_HASH(ump, cg); if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) { if (newbmsafemap) WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); return (bmsafemap); } if (newbmsafemap) { bmsafemap = newbmsafemap; } else { FREE_LOCK(ump); bmsafemap = malloc(sizeof(struct bmsafemap), M_BMSAFEMAP, M_SOFTDEP_FLAGS); workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); ACQUIRE_LOCK(ump); } bmsafemap->sm_buf = bp; LIST_INIT(&bmsafemap->sm_inodedephd); LIST_INIT(&bmsafemap->sm_inodedepwr); LIST_INIT(&bmsafemap->sm_newblkhd); LIST_INIT(&bmsafemap->sm_newblkwr); LIST_INIT(&bmsafemap->sm_jaddrefhd); LIST_INIT(&bmsafemap->sm_jnewblkhd); LIST_INIT(&bmsafemap->sm_freehd); LIST_INIT(&bmsafemap->sm_freewr); if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) { WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); return (collision); } bmsafemap->sm_cg = cg; LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash); LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); return (bmsafemap); } /* * Direct block allocation dependencies. * * When a new block is allocated, the corresponding disk locations must be * initialized (with zeros or new data) before the on-disk inode points to * them. Also, the freemap from which the block was allocated must be * updated (on disk) before the inode's pointer. These two dependencies are * independent of each other and are needed for all file blocks and indirect * blocks that are pointed to directly by the inode. Just before the * "in-core" version of the inode is updated with a newly allocated block * number, a procedure (below) is called to setup allocation dependency * structures. These structures are removed when the corresponding * dependencies are satisfied or when the block allocation becomes obsolete * (i.e., the file is deleted, the block is de-allocated, or the block is a * fragment that gets upgraded). All of these cases are handled in * procedures described later. * * When a file extension causes a fragment to be upgraded, either to a larger * fragment or to a full block, the on-disk location may change (if the * previous fragment could not simply be extended). In this case, the old * fragment must be de-allocated, but not until after the inode's pointer has * been updated. In most cases, this is handled by later procedures, which * will construct a "freefrag" structure to be added to the workitem queue * when the inode update is complete (or obsolete). The main exception to * this is when an allocation occurs while a pending allocation dependency * (for the same block pointer) remains. This case is handled in the main * allocation dependency setup procedure by immediately freeing the * unreferenced fragments. */ void softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp) struct inode *ip; /* inode to which block is being added */ ufs_lbn_t off; /* block pointer within inode */ ufs2_daddr_t newblkno; /* disk block number being added */ ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ long newsize; /* size of new block */ long oldsize; /* size of new block */ struct buf *bp; /* bp for allocated block */ { struct allocdirect *adp, *oldadp; struct allocdirectlst *adphead; struct freefrag *freefrag; struct inodedep *inodedep; struct pagedep *pagedep; struct jnewblk *jnewblk; struct newblk *newblk; struct mount *mp; ufs_lbn_t lbn; lbn = bp->b_lblkno; mp = ITOVFS(ip); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_allocdirect called on non-softdep filesystem")); if (oldblkno && oldblkno != newblkno) freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); else freefrag = NULL; CTR6(KTR_SUJ, "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd " "off %jd newsize %ld oldsize %d", ip->i_number, newblkno, oldblkno, off, newsize, oldsize); ACQUIRE_LOCK(ITOUMP(ip)); if (off >= UFS_NDADDR) { if (lbn > 0) panic("softdep_setup_allocdirect: bad lbn %jd, off %jd", lbn, off); /* allocating an indirect block */ if (oldblkno != 0) panic("softdep_setup_allocdirect: non-zero indir"); } else { if (off != lbn) panic("softdep_setup_allocdirect: lbn %jd != off %jd", lbn, off); /* * Allocating a direct block. * * If we are allocating a directory block, then we must * allocate an associated pagedep to track additions and * deletions. */ if ((ip->i_mode & IFMT) == IFDIR) pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC, &pagedep); } if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) panic("softdep_setup_allocdirect: lost block"); KASSERT(newblk->nb_list.wk_type == D_NEWBLK, ("softdep_setup_allocdirect: newblk already initialized")); /* * Convert the newblk to an allocdirect. */ WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); adp = (struct allocdirect *)newblk; newblk->nb_freefrag = freefrag; adp->ad_offset = off; adp->ad_oldblkno = oldblkno; adp->ad_newsize = newsize; adp->ad_oldsize = oldsize; /* * Finish initializing the journal. */ if ((jnewblk = newblk->nb_jnewblk) != NULL) { jnewblk->jn_ino = ip->i_number; jnewblk->jn_lbn = lbn; add_to_journal(&jnewblk->jn_list); } if (freefrag && freefrag->ff_jdep != NULL && freefrag->ff_jdep->wk_type == D_JFREEFRAG) add_to_journal(freefrag->ff_jdep); inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); adp->ad_inodedep = inodedep; WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); /* * The list of allocdirects must be kept in sorted and ascending * order so that the rollback routines can quickly determine the * first uncommitted block (the size of the file stored on disk * ends at the end of the lowest committed fragment, or if there * are no fragments, at the end of the highest committed block). * Since files generally grow, the typical case is that the new * block is to be added at the end of the list. We speed this * special case by checking against the last allocdirect in the * list before laboriously traversing the list looking for the * insertion point. */ adphead = &inodedep->id_newinoupdt; oldadp = TAILQ_LAST(adphead, allocdirectlst); if (oldadp == NULL || oldadp->ad_offset <= off) { /* insert at end of list */ TAILQ_INSERT_TAIL(adphead, adp, ad_next); if (oldadp != NULL && oldadp->ad_offset == off) allocdirect_merge(adphead, adp, oldadp); FREE_LOCK(ITOUMP(ip)); return; } TAILQ_FOREACH(oldadp, adphead, ad_next) { if (oldadp->ad_offset >= off) break; } if (oldadp == NULL) panic("softdep_setup_allocdirect: lost entry"); /* insert in middle of list */ TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); if (oldadp->ad_offset == off) allocdirect_merge(adphead, adp, oldadp); FREE_LOCK(ITOUMP(ip)); } /* * Merge a newer and older journal record to be stored either in a * newblock or freefrag. This handles aggregating journal records for * fragment allocation into a second record as well as replacing a * journal free with an aborted journal allocation. A segment for the * oldest record will be placed on wkhd if it has been written. If not * the segment for the newer record will suffice. */ static struct worklist * jnewblk_merge(new, old, wkhd) struct worklist *new; struct worklist *old; struct workhead *wkhd; { struct jnewblk *njnewblk; struct jnewblk *jnewblk; /* Handle NULLs to simplify callers. */ if (new == NULL) return (old); if (old == NULL) return (new); /* Replace a jfreefrag with a jnewblk. */ if (new->wk_type == D_JFREEFRAG) { if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno) panic("jnewblk_merge: blkno mismatch: %p, %p", old, new); cancel_jfreefrag(WK_JFREEFRAG(new)); return (old); } if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK) panic("jnewblk_merge: Bad type: old %d new %d\n", old->wk_type, new->wk_type); /* * Handle merging of two jnewblk records that describe * different sets of fragments in the same block. */ jnewblk = WK_JNEWBLK(old); njnewblk = WK_JNEWBLK(new); if (jnewblk->jn_blkno != njnewblk->jn_blkno) panic("jnewblk_merge: Merging disparate blocks."); /* * The record may be rolled back in the cg. */ if (jnewblk->jn_state & UNDONE) { jnewblk->jn_state &= ~UNDONE; njnewblk->jn_state |= UNDONE; njnewblk->jn_state &= ~ATTACHED; } /* * We modify the newer addref and free the older so that if neither * has been written the most up-to-date copy will be on disk. If * both have been written but rolled back we only temporarily need * one of them to fix the bits when the cg write completes. */ jnewblk->jn_state |= ATTACHED | COMPLETE; njnewblk->jn_oldfrags = jnewblk->jn_oldfrags; cancel_jnewblk(jnewblk, wkhd); WORKLIST_REMOVE(&jnewblk->jn_list); free_jnewblk(jnewblk); return (new); } /* * Replace an old allocdirect dependency with a newer one. * This routine must be called with splbio interrupts blocked. */ static void allocdirect_merge(adphead, newadp, oldadp) struct allocdirectlst *adphead; /* head of list holding allocdirects */ struct allocdirect *newadp; /* allocdirect being added */ struct allocdirect *oldadp; /* existing allocdirect being checked */ { struct worklist *wk; struct freefrag *freefrag; freefrag = NULL; LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp)); if (newadp->ad_oldblkno != oldadp->ad_newblkno || newadp->ad_oldsize != oldadp->ad_newsize || newadp->ad_offset >= UFS_NDADDR) panic("%s %jd != new %jd || old size %ld != new %ld", "allocdirect_merge: old blkno", (intmax_t)newadp->ad_oldblkno, (intmax_t)oldadp->ad_newblkno, newadp->ad_oldsize, oldadp->ad_newsize); newadp->ad_oldblkno = oldadp->ad_oldblkno; newadp->ad_oldsize = oldadp->ad_oldsize; /* * If the old dependency had a fragment to free or had never * previously had a block allocated, then the new dependency * can immediately post its freefrag and adopt the old freefrag. * This action is done by swapping the freefrag dependencies. * The new dependency gains the old one's freefrag, and the * old one gets the new one and then immediately puts it on * the worklist when it is freed by free_newblk. It is * not possible to do this swap when the old dependency had a * non-zero size but no previous fragment to free. This condition * arises when the new block is an extension of the old block. * Here, the first part of the fragment allocated to the new * dependency is part of the block currently claimed on disk by * the old dependency, so cannot legitimately be freed until the * conditions for the new dependency are fulfilled. */ freefrag = newadp->ad_freefrag; if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { newadp->ad_freefrag = oldadp->ad_freefrag; oldadp->ad_freefrag = freefrag; } /* * If we are tracking a new directory-block allocation, * move it from the old allocdirect to the new allocdirect. */ if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { WORKLIST_REMOVE(wk); if (!LIST_EMPTY(&oldadp->ad_newdirblk)) panic("allocdirect_merge: extra newdirblk"); WORKLIST_INSERT(&newadp->ad_newdirblk, wk); } TAILQ_REMOVE(adphead, oldadp, ad_next); /* * We need to move any journal dependencies over to the freefrag * that releases this block if it exists. Otherwise we are * extending an existing block and we'll wait until that is * complete to release the journal space and extend the * new journal to cover this old space as well. */ if (freefrag == NULL) { if (oldadp->ad_newblkno != newadp->ad_newblkno) panic("allocdirect_merge: %jd != %jd", oldadp->ad_newblkno, newadp->ad_newblkno); newadp->ad_block.nb_jnewblk = (struct jnewblk *) jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list, &oldadp->ad_block.nb_jnewblk->jn_list, &newadp->ad_block.nb_jwork); oldadp->ad_block.nb_jnewblk = NULL; cancel_newblk(&oldadp->ad_block, NULL, &newadp->ad_block.nb_jwork); } else { wk = (struct worklist *) cancel_newblk(&oldadp->ad_block, &freefrag->ff_list, &freefrag->ff_jwork); freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk, &freefrag->ff_jwork); } free_newblk(&oldadp->ad_block); } /* * Allocate a jfreefrag structure to journal a single block free. */ static struct jfreefrag * newjfreefrag(freefrag, ip, blkno, size, lbn) struct freefrag *freefrag; struct inode *ip; ufs2_daddr_t blkno; long size; ufs_lbn_t lbn; { struct jfreefrag *jfreefrag; struct fs *fs; fs = ITOFS(ip); jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG, M_SOFTDEP_FLAGS); workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, ITOVFS(ip)); jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list); jfreefrag->fr_state = ATTACHED | DEPCOMPLETE; jfreefrag->fr_ino = ip->i_number; jfreefrag->fr_lbn = lbn; jfreefrag->fr_blkno = blkno; jfreefrag->fr_frags = numfrags(fs, size); jfreefrag->fr_freefrag = freefrag; return (jfreefrag); } /* * Allocate a new freefrag structure. */ static struct freefrag * newfreefrag(ip, blkno, size, lbn) struct inode *ip; ufs2_daddr_t blkno; long size; ufs_lbn_t lbn; { struct freefrag *freefrag; struct ufsmount *ump; struct fs *fs; CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd", ip->i_number, blkno, size, lbn); ump = ITOUMP(ip); fs = ump->um_fs; if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) panic("newfreefrag: frag size"); freefrag = malloc(sizeof(struct freefrag), M_FREEFRAG, M_SOFTDEP_FLAGS); workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ump)); freefrag->ff_state = ATTACHED; LIST_INIT(&freefrag->ff_jwork); freefrag->ff_inum = ip->i_number; freefrag->ff_vtype = ITOV(ip)->v_type; freefrag->ff_blkno = blkno; freefrag->ff_fragsize = size; if (MOUNTEDSUJ(UFSTOVFS(ump))) { freefrag->ff_jdep = (struct worklist *) newjfreefrag(freefrag, ip, blkno, size, lbn); } else { freefrag->ff_state |= DEPCOMPLETE; freefrag->ff_jdep = NULL; } return (freefrag); } /* * This workitem de-allocates fragments that were replaced during * file block allocation. */ static void handle_workitem_freefrag(freefrag) struct freefrag *freefrag; { struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp); struct workhead wkhd; CTR3(KTR_SUJ, "handle_workitem_freefrag: ino %d blkno %jd size %ld", freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize); /* * It would be illegal to add new completion items to the * freefrag after it was schedule to be done so it must be * safe to modify the list head here. */ LIST_INIT(&wkhd); ACQUIRE_LOCK(ump); LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list); /* * If the journal has not been written we must cancel it here. */ if (freefrag->ff_jdep) { if (freefrag->ff_jdep->wk_type != D_JNEWBLK) panic("handle_workitem_freefrag: Unexpected type %d\n", freefrag->ff_jdep->wk_type); cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd); } FREE_LOCK(ump); ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno, freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd); ACQUIRE_LOCK(ump); WORKITEM_FREE(freefrag, D_FREEFRAG); FREE_LOCK(ump); } /* * Set up a dependency structure for an external attributes data block. * This routine follows much of the structure of softdep_setup_allocdirect. * See the description of softdep_setup_allocdirect above for details. */ void softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp) struct inode *ip; ufs_lbn_t off; ufs2_daddr_t newblkno; ufs2_daddr_t oldblkno; long newsize; long oldsize; struct buf *bp; { struct allocdirect *adp, *oldadp; struct allocdirectlst *adphead; struct freefrag *freefrag; struct inodedep *inodedep; struct jnewblk *jnewblk; struct newblk *newblk; struct mount *mp; struct ufsmount *ump; ufs_lbn_t lbn; mp = ITOVFS(ip); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_allocext called on non-softdep filesystem")); KASSERT(off < UFS_NXADDR, ("softdep_setup_allocext: lbn %lld > UFS_NXADDR", (long long)off)); lbn = bp->b_lblkno; if (oldblkno && oldblkno != newblkno) freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); else freefrag = NULL; ACQUIRE_LOCK(ump); if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) panic("softdep_setup_allocext: lost block"); KASSERT(newblk->nb_list.wk_type == D_NEWBLK, ("softdep_setup_allocext: newblk already initialized")); /* * Convert the newblk to an allocdirect. */ WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); adp = (struct allocdirect *)newblk; newblk->nb_freefrag = freefrag; adp->ad_offset = off; adp->ad_oldblkno = oldblkno; adp->ad_newsize = newsize; adp->ad_oldsize = oldsize; adp->ad_state |= EXTDATA; /* * Finish initializing the journal. */ if ((jnewblk = newblk->nb_jnewblk) != NULL) { jnewblk->jn_ino = ip->i_number; jnewblk->jn_lbn = lbn; add_to_journal(&jnewblk->jn_list); } if (freefrag && freefrag->ff_jdep != NULL && freefrag->ff_jdep->wk_type == D_JFREEFRAG) add_to_journal(freefrag->ff_jdep); inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); adp->ad_inodedep = inodedep; WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); /* * The list of allocdirects must be kept in sorted and ascending * order so that the rollback routines can quickly determine the * first uncommitted block (the size of the file stored on disk * ends at the end of the lowest committed fragment, or if there * are no fragments, at the end of the highest committed block). * Since files generally grow, the typical case is that the new * block is to be added at the end of the list. We speed this * special case by checking against the last allocdirect in the * list before laboriously traversing the list looking for the * insertion point. */ adphead = &inodedep->id_newextupdt; oldadp = TAILQ_LAST(adphead, allocdirectlst); if (oldadp == NULL || oldadp->ad_offset <= off) { /* insert at end of list */ TAILQ_INSERT_TAIL(adphead, adp, ad_next); if (oldadp != NULL && oldadp->ad_offset == off) allocdirect_merge(adphead, adp, oldadp); FREE_LOCK(ump); return; } TAILQ_FOREACH(oldadp, adphead, ad_next) { if (oldadp->ad_offset >= off) break; } if (oldadp == NULL) panic("softdep_setup_allocext: lost entry"); /* insert in middle of list */ TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); if (oldadp->ad_offset == off) allocdirect_merge(adphead, adp, oldadp); FREE_LOCK(ump); } /* * Indirect block allocation dependencies. * * The same dependencies that exist for a direct block also exist when * a new block is allocated and pointed to by an entry in a block of * indirect pointers. The undo/redo states described above are also * used here. Because an indirect block contains many pointers that * may have dependencies, a second copy of the entire in-memory indirect * block is kept. The buffer cache copy is always completely up-to-date. * The second copy, which is used only as a source for disk writes, * contains only the safe pointers (i.e., those that have no remaining * update dependencies). The second copy is freed when all pointers * are safe. The cache is not allowed to replace indirect blocks with * pending update dependencies. If a buffer containing an indirect * block with dependencies is written, these routines will mark it * dirty again. It can only be successfully written once all the * dependencies are removed. The ffs_fsync routine in conjunction with * softdep_sync_metadata work together to get all the dependencies * removed so that a file can be successfully written to disk. Three * procedures are used when setting up indirect block pointer * dependencies. The division is necessary because of the organization * of the "balloc" routine and because of the distinction between file * pages and file metadata blocks. */ /* * Allocate a new allocindir structure. */ static struct allocindir * newallocindir(ip, ptrno, newblkno, oldblkno, lbn) struct inode *ip; /* inode for file being extended */ int ptrno; /* offset of pointer in indirect block */ ufs2_daddr_t newblkno; /* disk block number being added */ ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ ufs_lbn_t lbn; { struct newblk *newblk; struct allocindir *aip; struct freefrag *freefrag; struct jnewblk *jnewblk; if (oldblkno) freefrag = newfreefrag(ip, oldblkno, ITOFS(ip)->fs_bsize, lbn); else freefrag = NULL; ACQUIRE_LOCK(ITOUMP(ip)); if (newblk_lookup(ITOVFS(ip), newblkno, 0, &newblk) == 0) panic("new_allocindir: lost block"); KASSERT(newblk->nb_list.wk_type == D_NEWBLK, ("newallocindir: newblk already initialized")); WORKITEM_REASSIGN(newblk, D_ALLOCINDIR); newblk->nb_freefrag = freefrag; aip = (struct allocindir *)newblk; aip->ai_offset = ptrno; aip->ai_oldblkno = oldblkno; aip->ai_lbn = lbn; if ((jnewblk = newblk->nb_jnewblk) != NULL) { jnewblk->jn_ino = ip->i_number; jnewblk->jn_lbn = lbn; add_to_journal(&jnewblk->jn_list); } if (freefrag && freefrag->ff_jdep != NULL && freefrag->ff_jdep->wk_type == D_JFREEFRAG) add_to_journal(freefrag->ff_jdep); return (aip); } /* * Called just before setting an indirect block pointer * to a newly allocated file page. */ void softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) struct inode *ip; /* inode for file being extended */ ufs_lbn_t lbn; /* allocated block number within file */ struct buf *bp; /* buffer with indirect blk referencing page */ int ptrno; /* offset of pointer in indirect block */ ufs2_daddr_t newblkno; /* disk block number being added */ ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ struct buf *nbp; /* buffer holding allocated page */ { struct inodedep *inodedep; struct freefrag *freefrag; struct allocindir *aip; struct pagedep *pagedep; struct mount *mp; struct ufsmount *ump; mp = ITOVFS(ip); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_allocindir_page called on non-softdep filesystem")); KASSERT(lbn == nbp->b_lblkno, ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd", lbn, bp->b_lblkno)); CTR4(KTR_SUJ, "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd " "lbn %jd", ip->i_number, newblkno, oldblkno, lbn); ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page"); aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn); (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); /* * If we are allocating a directory page, then we must * allocate an associated pagedep to track additions and * deletions. */ if ((ip->i_mode & IFMT) == IFDIR) pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep); WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn); FREE_LOCK(ump); if (freefrag) handle_workitem_freefrag(freefrag); } /* * Called just before setting an indirect block pointer to a * newly allocated indirect block. */ void softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) struct buf *nbp; /* newly allocated indirect block */ struct inode *ip; /* inode for file being extended */ struct buf *bp; /* indirect block referencing allocated block */ int ptrno; /* offset of pointer in indirect block */ ufs2_daddr_t newblkno; /* disk block number being added */ { struct inodedep *inodedep; struct allocindir *aip; struct ufsmount *ump; ufs_lbn_t lbn; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_setup_allocindir_meta called on non-softdep filesystem")); CTR3(KTR_SUJ, "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d", ip->i_number, newblkno, ptrno); lbn = nbp->b_lblkno; ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta"); aip = newallocindir(ip, ptrno, newblkno, 0, lbn); inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep); WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)) panic("softdep_setup_allocindir_meta: Block already existed"); FREE_LOCK(ump); } static void indirdep_complete(indirdep) struct indirdep *indirdep; { struct allocindir *aip; LIST_REMOVE(indirdep, ir_next); indirdep->ir_state |= DEPCOMPLETE; while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) { LIST_REMOVE(aip, ai_next); free_newblk(&aip->ai_block); } /* * If this indirdep is not attached to a buf it was simply waiting * on completion to clear completehd. free_indirdep() asserts * that nothing is dangling. */ if ((indirdep->ir_state & ONWORKLIST) == 0) free_indirdep(indirdep); } static struct indirdep * indirdep_lookup(mp, ip, bp) struct mount *mp; struct inode *ip; struct buf *bp; { struct indirdep *indirdep, *newindirdep; struct newblk *newblk; struct ufsmount *ump; struct worklist *wk; struct fs *fs; ufs2_daddr_t blkno; ump = VFSTOUFS(mp); LOCK_OWNED(ump); indirdep = NULL; newindirdep = NULL; fs = ump->um_fs; for (;;) { LIST_FOREACH(wk, &bp->b_dep, wk_list) { if (wk->wk_type != D_INDIRDEP) continue; indirdep = WK_INDIRDEP(wk); break; } /* Found on the buffer worklist, no new structure to free. */ if (indirdep != NULL && newindirdep == NULL) return (indirdep); if (indirdep != NULL && newindirdep != NULL) panic("indirdep_lookup: simultaneous create"); /* None found on the buffer and a new structure is ready. */ if (indirdep == NULL && newindirdep != NULL) break; /* None found and no new structure available. */ FREE_LOCK(ump); newindirdep = malloc(sizeof(struct indirdep), M_INDIRDEP, M_SOFTDEP_FLAGS); workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp); newindirdep->ir_state = ATTACHED; if (I_IS_UFS1(ip)) newindirdep->ir_state |= UFS1FMT; TAILQ_INIT(&newindirdep->ir_trunc); newindirdep->ir_saveddata = NULL; LIST_INIT(&newindirdep->ir_deplisthd); LIST_INIT(&newindirdep->ir_donehd); LIST_INIT(&newindirdep->ir_writehd); LIST_INIT(&newindirdep->ir_completehd); if (bp->b_blkno == bp->b_lblkno) { ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, NULL, NULL); bp->b_blkno = blkno; } newindirdep->ir_freeblks = NULL; newindirdep->ir_savebp = getblk(ump->um_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0); newindirdep->ir_bp = bp; BUF_KERNPROC(newindirdep->ir_savebp); bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); ACQUIRE_LOCK(ump); } indirdep = newindirdep; WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); /* * If the block is not yet allocated we don't set DEPCOMPLETE so * that we don't free dependencies until the pointers are valid. * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather * than using the hash. */ if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)) LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next); else indirdep->ir_state |= DEPCOMPLETE; return (indirdep); } /* * Called to finish the allocation of the "aip" allocated * by one of the two routines above. */ static struct freefrag * setup_allocindir_phase2(bp, ip, inodedep, aip, lbn) struct buf *bp; /* in-memory copy of the indirect block */ struct inode *ip; /* inode for file being extended */ struct inodedep *inodedep; /* Inodedep for ip */ struct allocindir *aip; /* allocindir allocated by the above routines */ ufs_lbn_t lbn; /* Logical block number for this block. */ { struct fs *fs; struct indirdep *indirdep; struct allocindir *oldaip; struct freefrag *freefrag; struct mount *mp; struct ufsmount *ump; mp = ITOVFS(ip); ump = VFSTOUFS(mp); LOCK_OWNED(ump); fs = ump->um_fs; if (bp->b_lblkno >= 0) panic("setup_allocindir_phase2: not indir blk"); KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs), ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset)); indirdep = indirdep_lookup(mp, ip, bp); KASSERT(indirdep->ir_savebp != NULL, ("setup_allocindir_phase2 NULL ir_savebp")); aip->ai_indirdep = indirdep; /* * Check for an unwritten dependency for this indirect offset. If * there is, merge the old dependency into the new one. This happens * as a result of reallocblk only. */ freefrag = NULL; if (aip->ai_oldblkno != 0) { LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) { if (oldaip->ai_offset == aip->ai_offset) { freefrag = allocindir_merge(aip, oldaip); goto done; } } LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) { if (oldaip->ai_offset == aip->ai_offset) { freefrag = allocindir_merge(aip, oldaip); goto done; } } } done: LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); return (freefrag); } /* * Merge two allocindirs which refer to the same block. Move newblock * dependencies and setup the freefrags appropriately. */ static struct freefrag * allocindir_merge(aip, oldaip) struct allocindir *aip; struct allocindir *oldaip; { struct freefrag *freefrag; struct worklist *wk; if (oldaip->ai_newblkno != aip->ai_oldblkno) panic("allocindir_merge: blkno"); aip->ai_oldblkno = oldaip->ai_oldblkno; freefrag = aip->ai_freefrag; aip->ai_freefrag = oldaip->ai_freefrag; oldaip->ai_freefrag = NULL; KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag")); /* * If we are tracking a new directory-block allocation, * move it from the old allocindir to the new allocindir. */ if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) { WORKLIST_REMOVE(wk); if (!LIST_EMPTY(&oldaip->ai_newdirblk)) panic("allocindir_merge: extra newdirblk"); WORKLIST_INSERT(&aip->ai_newdirblk, wk); } /* * We can skip journaling for this freefrag and just complete * any pending journal work for the allocindir that is being * removed after the freefrag completes. */ if (freefrag->ff_jdep) cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep)); LIST_REMOVE(oldaip, ai_next); freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block, &freefrag->ff_list, &freefrag->ff_jwork); free_newblk(&oldaip->ai_block); return (freefrag); } static inline void setup_freedirect(freeblks, ip, i, needj) struct freeblks *freeblks; struct inode *ip; int i; int needj; { struct ufsmount *ump; ufs2_daddr_t blkno; int frags; blkno = DIP(ip, i_db[i]); if (blkno == 0) return; DIP_SET(ip, i_db[i], 0); ump = ITOUMP(ip); frags = sblksize(ump->um_fs, ip->i_size, i); frags = numfrags(ump->um_fs, frags); newfreework(ump, freeblks, NULL, i, blkno, frags, 0, needj); } static inline void setup_freeext(freeblks, ip, i, needj) struct freeblks *freeblks; struct inode *ip; int i; int needj; { struct ufsmount *ump; ufs2_daddr_t blkno; int frags; blkno = ip->i_din2->di_extb[i]; if (blkno == 0) return; ip->i_din2->di_extb[i] = 0; ump = ITOUMP(ip); frags = sblksize(ump->um_fs, ip->i_din2->di_extsize, i); frags = numfrags(ump->um_fs, frags); newfreework(ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj); } static inline void setup_freeindir(freeblks, ip, i, lbn, needj) struct freeblks *freeblks; struct inode *ip; int i; ufs_lbn_t lbn; int needj; { struct ufsmount *ump; ufs2_daddr_t blkno; blkno = DIP(ip, i_ib[i]); if (blkno == 0) return; DIP_SET(ip, i_ib[i], 0); ump = ITOUMP(ip); newfreework(ump, freeblks, NULL, lbn, blkno, ump->um_fs->fs_frag, 0, needj); } static inline struct freeblks * newfreeblks(mp, ip) struct mount *mp; struct inode *ip; { struct freeblks *freeblks; freeblks = malloc(sizeof(struct freeblks), M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp); LIST_INIT(&freeblks->fb_jblkdephd); LIST_INIT(&freeblks->fb_jwork); freeblks->fb_ref = 0; freeblks->fb_cgwait = 0; freeblks->fb_state = ATTACHED; freeblks->fb_uid = ip->i_uid; freeblks->fb_inum = ip->i_number; freeblks->fb_vtype = ITOV(ip)->v_type; freeblks->fb_modrev = DIP(ip, i_modrev); freeblks->fb_devvp = ITODEVVP(ip); freeblks->fb_chkcnt = 0; freeblks->fb_len = 0; return (freeblks); } static void trunc_indirdep(indirdep, freeblks, bp, off) struct indirdep *indirdep; struct freeblks *freeblks; struct buf *bp; int off; { struct allocindir *aip, *aipn; /* * The first set of allocindirs won't be in savedbp. */ LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn) if (aip->ai_offset > off) cancel_allocindir(aip, bp, freeblks, 1); LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn) if (aip->ai_offset > off) cancel_allocindir(aip, bp, freeblks, 1); /* * These will exist in savedbp. */ LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn) if (aip->ai_offset > off) cancel_allocindir(aip, NULL, freeblks, 0); LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn) if (aip->ai_offset > off) cancel_allocindir(aip, NULL, freeblks, 0); } /* * Follow the chain of indirects down to lastlbn creating a freework * structure for each. This will be used to start indir_trunc() at * the right offset and create the journal records for the parrtial * truncation. A second step will handle the truncated dependencies. */ static int setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno) struct freeblks *freeblks; struct inode *ip; ufs_lbn_t lbn; ufs_lbn_t lastlbn; ufs2_daddr_t blkno; { struct indirdep *indirdep; struct indirdep *indirn; struct freework *freework; struct newblk *newblk; struct mount *mp; struct ufsmount *ump; struct buf *bp; uint8_t *start; uint8_t *end; ufs_lbn_t lbnadd; int level; int error; int off; freework = NULL; if (blkno == 0) return (0); mp = freeblks->fb_list.wk_mp; ump = VFSTOUFS(mp); bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0); if ((bp->b_flags & B_CACHE) == 0) { bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno); bp->b_iocmd = BIO_READ; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; vfs_busy_pages(bp, 0); bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); #ifdef RACCT if (racct_enable) { PROC_LOCK(curproc); racct_add_buf(curproc, bp, 0); PROC_UNLOCK(curproc); } #endif /* RACCT */ curthread->td_ru.ru_inblock++; error = bufwait(bp); if (error) { brelse(bp); return (error); } } level = lbn_level(lbn); lbnadd = lbn_offset(ump->um_fs, level); /* * Compute the offset of the last block we want to keep. Store * in the freework the first block we want to completely free. */ off = (lastlbn - -(lbn + level)) / lbnadd; if (off + 1 == NINDIR(ump->um_fs)) goto nowork; freework = newfreework(ump, freeblks, NULL, lbn, blkno, 0, off + 1, 0); /* * Link the freework into the indirdep. This will prevent any new * allocations from proceeding until we are finished with the * truncate and the block is written. */ ACQUIRE_LOCK(ump); indirdep = indirdep_lookup(mp, ip, bp); if (indirdep->ir_freeblks) panic("setup_trunc_indir: indirdep already truncated."); TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); freework->fw_indir = indirdep; /* * Cancel any allocindirs that will not make it to disk. * We have to do this for all copies of the indirdep that * live on this newblk. */ if ((indirdep->ir_state & DEPCOMPLETE) == 0) { newblk_lookup(mp, dbtofsb(ump->um_fs, bp->b_blkno), 0, &newblk); LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next) trunc_indirdep(indirn, freeblks, bp, off); } else trunc_indirdep(indirdep, freeblks, bp, off); FREE_LOCK(ump); /* * Creation is protected by the buf lock. The saveddata is only * needed if a full truncation follows a partial truncation but it * is difficult to allocate in that case so we fetch it anyway. */ if (indirdep->ir_saveddata == NULL) indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, M_SOFTDEP_FLAGS); nowork: /* Fetch the blkno of the child and the zero start offset. */ if (I_IS_UFS1(ip)) { blkno = ((ufs1_daddr_t *)bp->b_data)[off]; start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1]; } else { blkno = ((ufs2_daddr_t *)bp->b_data)[off]; start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1]; } if (freework) { /* Zero the truncated pointers. */ end = bp->b_data + bp->b_bcount; bzero(start, end - start); bdwrite(bp); } else bqrelse(bp); if (level == 0) return (0); lbn++; /* adjust level */ lbn -= (off * lbnadd); return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno); } /* * Complete the partial truncation of an indirect block setup by * setup_trunc_indir(). This zeros the truncated pointers in the saved * copy and writes them to disk before the freeblks is allowed to complete. */ static void complete_trunc_indir(freework) struct freework *freework; { struct freework *fwn; struct indirdep *indirdep; struct ufsmount *ump; struct buf *bp; uintptr_t start; int count; ump = VFSTOUFS(freework->fw_list.wk_mp); LOCK_OWNED(ump); indirdep = freework->fw_indir; for (;;) { bp = indirdep->ir_bp; /* See if the block was discarded. */ if (bp == NULL) break; /* Inline part of getdirtybuf(). We dont want bremfree. */ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) break; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, LOCK_PTR(ump)) == 0) BUF_UNLOCK(bp); ACQUIRE_LOCK(ump); } freework->fw_state |= DEPCOMPLETE; TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next); /* * Zero the pointers in the saved copy. */ if (indirdep->ir_state & UFS1FMT) start = sizeof(ufs1_daddr_t); else start = sizeof(ufs2_daddr_t); start *= freework->fw_start; count = indirdep->ir_savebp->b_bcount - start; start += (uintptr_t)indirdep->ir_savebp->b_data; bzero((char *)start, count); /* * We need to start the next truncation in the list if it has not * been started yet. */ fwn = TAILQ_FIRST(&indirdep->ir_trunc); if (fwn != NULL) { if (fwn->fw_freeblks == indirdep->ir_freeblks) TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next); if ((fwn->fw_state & ONWORKLIST) == 0) freework_enqueue(fwn); } /* * If bp is NULL the block was fully truncated, restore * the saved block list otherwise free it if it is no * longer needed. */ if (TAILQ_EMPTY(&indirdep->ir_trunc)) { if (bp == NULL) bcopy(indirdep->ir_saveddata, indirdep->ir_savebp->b_data, indirdep->ir_savebp->b_bcount); free(indirdep->ir_saveddata, M_INDIRDEP); indirdep->ir_saveddata = NULL; } /* * When bp is NULL there is a full truncation pending. We * must wait for this full truncation to be journaled before * we can release this freework because the disk pointers will * never be written as zero. */ if (bp == NULL) { if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd)) handle_written_freework(freework); else WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd, &freework->fw_list); } else { /* Complete when the real copy is written. */ WORKLIST_INSERT(&bp->b_dep, &freework->fw_list); BUF_UNLOCK(bp); } } /* * Calculate the number of blocks we are going to release where datablocks * is the current total and length is the new file size. */ static ufs2_daddr_t blkcount(fs, datablocks, length) struct fs *fs; ufs2_daddr_t datablocks; off_t length; { off_t totblks, numblks; totblks = 0; numblks = howmany(length, fs->fs_bsize); if (numblks <= UFS_NDADDR) { totblks = howmany(length, fs->fs_fsize); goto out; } totblks = blkstofrags(fs, numblks); numblks -= UFS_NDADDR; /* * Count all single, then double, then triple indirects required. * Subtracting one indirects worth of blocks for each pass * acknowledges one of each pointed to by the inode. */ for (;;) { totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs))); numblks -= NINDIR(fs); if (numblks <= 0) break; numblks = howmany(numblks, NINDIR(fs)); } out: totblks = fsbtodb(fs, totblks); /* * Handle sparse files. We can't reclaim more blocks than the inode * references. We will correct it later in handle_complete_freeblks() * when we know the real count. */ if (totblks > datablocks) return (0); return (datablocks - totblks); } /* * Handle freeblocks for journaled softupdate filesystems. * * Contrary to normal softupdates, we must preserve the block pointers in * indirects until their subordinates are free. This is to avoid journaling * every block that is freed which may consume more space than the journal * itself. The recovery program will see the free block journals at the * base of the truncated area and traverse them to reclaim space. The * pointers in the inode may be cleared immediately after the journal * records are written because each direct and indirect pointer in the * inode is recorded in a journal. This permits full truncation to proceed * asynchronously. The write order is journal -> inode -> cgs -> indirects. * * The algorithm is as follows: * 1) Traverse the in-memory state and create journal entries to release * the relevant blocks and full indirect trees. * 2) Traverse the indirect block chain adding partial truncation freework * records to indirects in the path to lastlbn. The freework will * prevent new allocation dependencies from being satisfied in this * indirect until the truncation completes. * 3) Read and lock the inode block, performing an update with the new size * and pointers. This prevents truncated data from becoming valid on * disk through step 4. * 4) Reap unsatisfied dependencies that are beyond the truncated area, * eliminate journal work for those records that do not require it. * 5) Schedule the journal records to be written followed by the inode block. * 6) Allocate any necessary frags for the end of file. * 7) Zero any partially truncated blocks. * * From this truncation proceeds asynchronously using the freework and * indir_trunc machinery. The file will not be extended again into a * partially truncated indirect block until all work is completed but * the normal dependency mechanism ensures that it is rolled back/forward * as appropriate. Further truncation may occur without delay and is * serialized in indir_trunc(). */ void softdep_journal_freeblocks(ip, cred, length, flags) struct inode *ip; /* The inode whose length is to be reduced */ struct ucred *cred; off_t length; /* The new length for the file */ int flags; /* IO_EXT and/or IO_NORMAL */ { struct freeblks *freeblks, *fbn; struct worklist *wk, *wkn; struct inodedep *inodedep; struct jblkdep *jblkdep; struct allocdirect *adp, *adpn; struct ufsmount *ump; struct fs *fs; struct buf *bp; struct vnode *vp; struct mount *mp; ufs2_daddr_t extblocks, datablocks; ufs_lbn_t tmpval, lbn, lastlbn; int frags, lastoff, iboff, allocblock, needj, error, i; ump = ITOUMP(ip); mp = UFSTOVFS(ump); fs = ump->um_fs; KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_journal_freeblocks called on non-softdep filesystem")); vp = ITOV(ip); needj = 1; iboff = -1; allocblock = 0; extblocks = 0; datablocks = 0; frags = 0; freeblks = newfreeblks(mp, ip); ACQUIRE_LOCK(ump); /* * If we're truncating a removed file that will never be written * we don't need to journal the block frees. The canceled journals * for the allocations will suffice. */ inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED && length == 0) needj = 0; CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d", ip->i_number, length, needj); FREE_LOCK(ump); /* * Calculate the lbn that we are truncating to. This results in -1 * if we're truncating the 0 bytes. So it is the last lbn we want * to keep, not the first lbn we want to truncate. */ lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastoff = blkoff(fs, length); /* * Compute frags we are keeping in lastlbn. 0 means all. */ if (lastlbn >= 0 && lastlbn < UFS_NDADDR) { frags = fragroundup(fs, lastoff); /* adp offset of last valid allocdirect. */ iboff = lastlbn; } else if (lastlbn > 0) iboff = UFS_NDADDR; if (fs->fs_magic == FS_UFS2_MAGIC) extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); /* * Handle normal data blocks and indirects. This section saves * values used after the inode update to complete frag and indirect * truncation. */ if ((flags & IO_NORMAL) != 0) { /* * Handle truncation of whole direct and indirect blocks. */ for (i = iboff + 1; i < UFS_NDADDR; i++) setup_freedirect(freeblks, ip, i, needj); for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR; i < UFS_NIADDR; i++, lbn += tmpval, tmpval *= NINDIR(fs)) { /* Release a whole indirect tree. */ if (lbn > lastlbn) { setup_freeindir(freeblks, ip, i, -lbn -i, needj); continue; } iboff = i + UFS_NDADDR; /* * Traverse partially truncated indirect tree. */ if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn) setup_trunc_indir(freeblks, ip, -lbn - i, lastlbn, DIP(ip, i_ib[i])); } /* * Handle partial truncation to a frag boundary. */ if (frags) { ufs2_daddr_t blkno; long oldfrags; oldfrags = blksize(fs, ip, lastlbn); blkno = DIP(ip, i_db[lastlbn]); if (blkno && oldfrags != frags) { oldfrags -= frags; oldfrags = numfrags(fs, oldfrags); blkno += numfrags(fs, frags); newfreework(ump, freeblks, NULL, lastlbn, blkno, oldfrags, 0, needj); if (needj) adjust_newfreework(freeblks, numfrags(fs, frags)); } else if (blkno == 0) allocblock = 1; } /* * Add a journal record for partial truncate if we are * handling indirect blocks. Non-indirects need no extra * journaling. */ if (length != 0 && lastlbn >= UFS_NDADDR) { ip->i_flag |= IN_TRUNCATED; newjtrunc(freeblks, length, 0); } ip->i_size = length; DIP_SET(ip, i_size, ip->i_size); datablocks = DIP(ip, i_blocks) - extblocks; if (length != 0) datablocks = blkcount(fs, datablocks, length); freeblks->fb_len = length; } if ((flags & IO_EXT) != 0) { for (i = 0; i < UFS_NXADDR; i++) setup_freeext(freeblks, ip, i, needj); ip->i_din2->di_extsize = 0; datablocks += extblocks; } #ifdef QUOTA /* Reference the quotas in case the block count is wrong in the end. */ quotaref(vp, freeblks->fb_quota); (void) chkdq(ip, -datablocks, NOCRED, 0); #endif freeblks->fb_chkcnt = -datablocks; UFS_LOCK(ump); fs->fs_pendingblocks += datablocks; UFS_UNLOCK(ump); DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); /* * Handle truncation of incomplete alloc direct dependencies. We * hold the inode block locked to prevent incomplete dependencies * from reaching the disk while we are eliminating those that * have been truncated. This is a partially inlined ffs_update(). */ ufs_itimes(vp); ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->fs_bsize, cred, &bp); if (error) { brelse(bp); softdep_error("softdep_journal_freeblocks", error); return; } if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; softdep_update_inodeblock(ip, bp, 0); if (ump->um_fstype == UFS1) *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; else *((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; ACQUIRE_LOCK(ump); (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); if ((inodedep->id_state & IOSTARTED) != 0) panic("softdep_setup_freeblocks: inode busy"); /* * Add the freeblks structure to the list of operations that * must await the zero'ed inode being written to disk. If we * still have a bitmap dependency (needj), then the inode * has never been written to disk, so we can process the * freeblks below once we have deleted the dependencies. */ if (needj) WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); else freeblks->fb_state |= COMPLETE; if ((flags & IO_NORMAL) != 0) { TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) { if (adp->ad_offset > iboff) cancel_allocdirect(&inodedep->id_inoupdt, adp, freeblks); /* * Truncate the allocdirect. We could eliminate * or modify journal records as well. */ else if (adp->ad_offset == iboff && frags) adp->ad_newsize = frags; } } if ((flags & IO_EXT) != 0) while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) cancel_allocdirect(&inodedep->id_extupdt, adp, freeblks); /* * Scan the bufwait list for newblock dependencies that will never * make it to disk. */ LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) { if (wk->wk_type != D_ALLOCDIRECT) continue; adp = WK_ALLOCDIRECT(wk); if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) || ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) { cancel_jfreeblk(freeblks, adp->ad_newblkno); cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork); WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); } } /* * Add journal work. */ LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) add_to_journal(&jblkdep->jb_list); FREE_LOCK(ump); bdwrite(bp); /* * Truncate dependency structures beyond length. */ trunc_dependencies(ip, freeblks, lastlbn, frags, flags); /* * This is only set when we need to allocate a fragment because * none existed at the end of a frag-sized file. It handles only * allocating a new, zero filled block. */ if (allocblock) { ip->i_size = length - lastoff; DIP_SET(ip, i_size, ip->i_size); error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp); if (error != 0) { softdep_error("softdep_journal_freeblks", error); return; } ip->i_size = length; DIP_SET(ip, i_size, length); ip->i_flag |= IN_CHANGE | IN_UPDATE; allocbuf(bp, frags); ffs_update(vp, 0); bawrite(bp); } else if (lastoff != 0 && vp->v_type != VDIR) { int size; /* * Zero the end of a truncated frag or block. */ size = sblksize(fs, length, lastlbn); error = bread(vp, lastlbn, size, cred, &bp); if (error) { softdep_error("softdep_journal_freeblks", error); return; } bzero((char *)bp->b_data + lastoff, size - lastoff); bawrite(bp); } ACQUIRE_LOCK(ump); inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next); freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST; /* * We zero earlier truncations so they don't erroneously * update i_blocks. */ if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0) TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next) fbn->fb_len = 0; if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) freeblks->fb_state |= INPROGRESS; else freeblks = NULL; FREE_LOCK(ump); if (freeblks) handle_workitem_freeblocks(freeblks, 0); trunc_pages(ip, length, extblocks, flags); } /* * Flush a JOP_SYNC to the journal. */ void softdep_journal_fsync(ip) struct inode *ip; { struct jfsync *jfsync; struct ufsmount *ump; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_journal_fsync called on non-softdep filesystem")); if ((ip->i_flag & IN_TRUNCATED) == 0) return; ip->i_flag &= ~IN_TRUNCATED; jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO); workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ump)); jfsync->jfs_size = ip->i_size; jfsync->jfs_ino = ip->i_number; ACQUIRE_LOCK(ump); add_to_journal(&jfsync->jfs_list); jwait(&jfsync->jfs_list, MNT_WAIT); FREE_LOCK(ump); } /* * Block de-allocation dependencies. * * When blocks are de-allocated, the on-disk pointers must be nullified before * the blocks are made available for use by other files. (The true * requirement is that old pointers must be nullified before new on-disk * pointers are set. We chose this slightly more stringent requirement to * reduce complexity.) Our implementation handles this dependency by updating * the inode (or indirect block) appropriately but delaying the actual block * de-allocation (i.e., freemap and free space count manipulation) until * after the updated versions reach stable storage. After the disk is * updated, the blocks can be safely de-allocated whenever it is convenient. * This implementation handles only the common case of reducing a file's * length to zero. Other cases are handled by the conventional synchronous * write approach. * * The ffs implementation with which we worked double-checks * the state of the block pointers and file size as it reduces * a file's length. Some of this code is replicated here in our * soft updates implementation. The freeblks->fb_chkcnt field is * used to transfer a part of this information to the procedure * that eventually de-allocates the blocks. * * This routine should be called from the routine that shortens * a file's length, before the inode's size or block pointers * are modified. It will save the block pointer information for * later release and zero the inode so that the calling routine * can release it. */ void softdep_setup_freeblocks(ip, length, flags) struct inode *ip; /* The inode whose length is to be reduced */ off_t length; /* The new length for the file */ int flags; /* IO_EXT and/or IO_NORMAL */ { struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; struct freeblks *freeblks; struct inodedep *inodedep; struct allocdirect *adp; struct ufsmount *ump; struct buf *bp; struct fs *fs; ufs2_daddr_t extblocks, datablocks; struct mount *mp; int i, delay, error; ufs_lbn_t tmpval; ufs_lbn_t lbn; ump = ITOUMP(ip); mp = UFSTOVFS(ump); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_freeblocks called on non-softdep filesystem")); CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld", ip->i_number, length); KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length")); fs = ump->um_fs; if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); softdep_error("softdep_setup_freeblocks", error); return; } freeblks = newfreeblks(mp, ip); extblocks = 0; datablocks = 0; if (fs->fs_magic == FS_UFS2_MAGIC) extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); if ((flags & IO_NORMAL) != 0) { for (i = 0; i < UFS_NDADDR; i++) setup_freedirect(freeblks, ip, i, 0); for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR; i < UFS_NIADDR; i++, lbn += tmpval, tmpval *= NINDIR(fs)) setup_freeindir(freeblks, ip, i, -lbn -i, 0); ip->i_size = 0; DIP_SET(ip, i_size, 0); datablocks = DIP(ip, i_blocks) - extblocks; } if ((flags & IO_EXT) != 0) { for (i = 0; i < UFS_NXADDR; i++) setup_freeext(freeblks, ip, i, 0); ip->i_din2->di_extsize = 0; datablocks += extblocks; } #ifdef QUOTA /* Reference the quotas in case the block count is wrong in the end. */ quotaref(ITOV(ip), freeblks->fb_quota); (void) chkdq(ip, -datablocks, NOCRED, 0); #endif freeblks->fb_chkcnt = -datablocks; UFS_LOCK(ump); fs->fs_pendingblocks += datablocks; UFS_UNLOCK(ump); DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); /* * Push the zero'ed inode to to its disk buffer so that we are free * to delete its dependencies below. Once the dependencies are gone * the buffer can be safely released. */ if (ump->um_fstype == UFS1) { dp1 = ((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)); ip->i_din1->di_freelink = dp1->di_freelink; *dp1 = *ip->i_din1; } else { dp2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)); ip->i_din2->di_freelink = dp2->di_freelink; *dp2 = *ip->i_din2; } /* * Find and eliminate any inode dependencies. */ ACQUIRE_LOCK(ump); (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); if ((inodedep->id_state & IOSTARTED) != 0) panic("softdep_setup_freeblocks: inode busy"); /* * Add the freeblks structure to the list of operations that * must await the zero'ed inode being written to disk. If we * still have a bitmap dependency (delay == 0), then the inode * has never been written to disk, so we can process the * freeblks below once we have deleted the dependencies. */ delay = (inodedep->id_state & DEPCOMPLETE); if (delay) WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); else freeblks->fb_state |= COMPLETE; /* * Because the file length has been truncated to zero, any * pending block allocation dependency structures associated * with this inode are obsolete and can simply be de-allocated. * We must first merge the two dependency lists to get rid of * any duplicate freefrag structures, then purge the merged list. * If we still have a bitmap dependency, then the inode has never * been written to disk, so we can free any fragments without delay. */ if (flags & IO_NORMAL) { merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) cancel_allocdirect(&inodedep->id_inoupdt, adp, freeblks); } if (flags & IO_EXT) { merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) cancel_allocdirect(&inodedep->id_extupdt, adp, freeblks); } FREE_LOCK(ump); bdwrite(bp); trunc_dependencies(ip, freeblks, -1, 0, flags); ACQUIRE_LOCK(ump); if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) (void) free_inodedep(inodedep); freeblks->fb_state |= DEPCOMPLETE; /* * If the inode with zeroed block pointers is now on disk * we can start freeing blocks. */ if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) freeblks->fb_state |= INPROGRESS; else freeblks = NULL; FREE_LOCK(ump); if (freeblks) handle_workitem_freeblocks(freeblks, 0); trunc_pages(ip, length, extblocks, flags); } /* * Eliminate pages from the page cache that back parts of this inode and * adjust the vnode pager's idea of our size. This prevents stale data * from hanging around in the page cache. */ static void trunc_pages(ip, length, extblocks, flags) struct inode *ip; off_t length; ufs2_daddr_t extblocks; int flags; { struct vnode *vp; struct fs *fs; ufs_lbn_t lbn; off_t end, extend; vp = ITOV(ip); fs = ITOFS(ip); extend = OFF_TO_IDX(lblktosize(fs, -extblocks)); if ((flags & IO_EXT) != 0) vn_pages_remove(vp, extend, 0); if ((flags & IO_NORMAL) == 0) return; BO_LOCK(&vp->v_bufobj); drain_output(vp); BO_UNLOCK(&vp->v_bufobj); /* * The vnode pager eliminates file pages we eliminate indirects * below. */ vnode_pager_setsize(vp, length); /* * Calculate the end based on the last indirect we want to keep. If * the block extends into indirects we can just use the negative of * its lbn. Doubles and triples exist at lower numbers so we must * be careful not to remove those, if they exist. double and triple * indirect lbns do not overlap with others so it is not important * to verify how many levels are required. */ lbn = lblkno(fs, length); if (lbn >= UFS_NDADDR) { /* Calculate the virtual lbn of the triple indirect. */ lbn = -lbn - (UFS_NIADDR - 1); end = OFF_TO_IDX(lblktosize(fs, lbn)); } else end = extend; vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end); } /* * See if the buf bp is in the range eliminated by truncation. */ static int trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags) struct buf *bp; int *blkoffp; ufs_lbn_t lastlbn; int lastoff; int flags; { ufs_lbn_t lbn; *blkoffp = 0; /* Only match ext/normal blocks as appropriate. */ if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0)) return (0); /* ALTDATA is always a full truncation. */ if ((bp->b_xflags & BX_ALTDATA) != 0) return (1); /* -1 is full truncation. */ if (lastlbn == -1) return (1); /* * If this is a partial truncate we only want those * blocks and indirect blocks that cover the range * we're after. */ lbn = bp->b_lblkno; if (lbn < 0) lbn = -(lbn + lbn_level(lbn)); if (lbn < lastlbn) return (0); /* Here we only truncate lblkno if it's partial. */ if (lbn == lastlbn) { if (lastoff == 0) return (0); *blkoffp = lastoff; } return (1); } /* * Eliminate any dependencies that exist in memory beyond lblkno:off */ static void trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags) struct inode *ip; struct freeblks *freeblks; ufs_lbn_t lastlbn; int lastoff; int flags; { struct bufobj *bo; struct vnode *vp; struct buf *bp; int blkoff; /* * We must wait for any I/O in progress to finish so that * all potential buffers on the dirty list will be visible. * Once they are all there, walk the list and get rid of * any dependencies. */ vp = ITOV(ip); bo = &vp->v_bufobj; BO_LOCK(bo); drain_output(vp); TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) bp->b_vflags &= ~BV_SCANNED; restart: TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { if (bp->b_vflags & BV_SCANNED) continue; if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { bp->b_vflags |= BV_SCANNED; continue; } KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer")); if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL) goto restart; BO_UNLOCK(bo); if (deallocate_dependencies(bp, freeblks, blkoff)) bqrelse(bp); else brelse(bp); BO_LOCK(bo); goto restart; } /* * Now do the work of vtruncbuf while also matching indirect blocks. */ TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) bp->b_vflags &= ~BV_SCANNED; cleanrestart: TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) { if (bp->b_vflags & BV_SCANNED) continue; if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { bp->b_vflags |= BV_SCANNED; continue; } if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo)) == ENOLCK) { BO_LOCK(bo); goto cleanrestart; } bp->b_vflags |= BV_SCANNED; bremfree(bp); if (blkoff != 0) { allocbuf(bp, blkoff); bqrelse(bp); } else { bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF; brelse(bp); } BO_LOCK(bo); goto cleanrestart; } drain_output(vp); BO_UNLOCK(bo); } static int cancel_pagedep(pagedep, freeblks, blkoff) struct pagedep *pagedep; struct freeblks *freeblks; int blkoff; { struct jremref *jremref; struct jmvref *jmvref; struct dirrem *dirrem, *tmp; int i; /* * Copy any directory remove dependencies to the list * to be processed after the freeblks proceeds. If * directory entry never made it to disk they * can be dumped directly onto the work list. */ LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) { /* Skip this directory removal if it is intended to remain. */ if (dirrem->dm_offset < blkoff) continue; /* * If there are any dirrems we wait for the journal write * to complete and then restart the buf scan as the lock * has been dropped. */ while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) { jwait(&jremref->jr_list, MNT_WAIT); return (ERESTART); } LIST_REMOVE(dirrem, dm_next); dirrem->dm_dirinum = pagedep->pd_ino; WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list); } while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) { jwait(&jmvref->jm_list, MNT_WAIT); return (ERESTART); } /* * When we're partially truncating a pagedep we just want to flush * journal entries and return. There can not be any adds in the * truncated portion of the directory and newblk must remain if * part of the block remains. */ if (blkoff != 0) { struct diradd *dap; LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) if (dap->da_offset > blkoff) panic("cancel_pagedep: diradd %p off %d > %d", dap, dap->da_offset, blkoff); for (i = 0; i < DAHASHSZ; i++) LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) if (dap->da_offset > blkoff) panic("cancel_pagedep: diradd %p off %d > %d", dap, dap->da_offset, blkoff); return (0); } /* * There should be no directory add dependencies present * as the directory could not be truncated until all * children were removed. */ KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL, ("deallocate_dependencies: pendinghd != NULL")); for (i = 0; i < DAHASHSZ; i++) KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL, ("deallocate_dependencies: diraddhd != NULL")); if ((pagedep->pd_state & NEWBLOCK) != 0) free_newdirblk(pagedep->pd_newdirblk); if (free_pagedep(pagedep) == 0) panic("Failed to free pagedep %p", pagedep); return (0); } /* * Reclaim any dependency structures from a buffer that is about to * be reallocated to a new vnode. The buffer must be locked, thus, * no I/O completion operations can occur while we are manipulating * its associated dependencies. The mutex is held so that other I/O's * associated with related dependencies do not occur. */ static int deallocate_dependencies(bp, freeblks, off) struct buf *bp; struct freeblks *freeblks; int off; { struct indirdep *indirdep; struct pagedep *pagedep; struct worklist *wk, *wkn; struct ufsmount *ump; if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) goto done; ump = VFSTOUFS(wk->wk_mp); ACQUIRE_LOCK(ump); LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) { switch (wk->wk_type) { case D_INDIRDEP: indirdep = WK_INDIRDEP(wk); if (bp->b_lblkno >= 0 || bp->b_blkno != indirdep->ir_savebp->b_lblkno) panic("deallocate_dependencies: not indir"); cancel_indirdep(indirdep, bp, freeblks); continue; case D_PAGEDEP: pagedep = WK_PAGEDEP(wk); if (cancel_pagedep(pagedep, freeblks, off)) { FREE_LOCK(ump); return (ERESTART); } continue; case D_ALLOCINDIR: /* * Simply remove the allocindir, we'll find it via * the indirdep where we can clear pointers if * needed. */ WORKLIST_REMOVE(wk); continue; case D_FREEWORK: /* * A truncation is waiting for the zero'd pointers * to be written. It can be freed when the freeblks * is journaled. */ WORKLIST_REMOVE(wk); wk->wk_state |= ONDEPLIST; WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); break; case D_ALLOCDIRECT: if (off != 0) continue; /* FALLTHROUGH */ default: panic("deallocate_dependencies: Unexpected type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } FREE_LOCK(ump); done: /* * Don't throw away this buf, we were partially truncating and * some deps may always remain. */ if (off) { allocbuf(bp, off); bp->b_vflags |= BV_SCANNED; return (EBUSY); } bp->b_flags |= B_INVAL | B_NOCACHE; return (0); } /* * An allocdirect is being canceled due to a truncate. We must make sure * the journal entry is released in concert with the blkfree that releases * the storage. Completed journal entries must not be released until the * space is no longer pointed to by the inode or in the bitmap. */ static void cancel_allocdirect(adphead, adp, freeblks) struct allocdirectlst *adphead; struct allocdirect *adp; struct freeblks *freeblks; { struct freework *freework; struct newblk *newblk; struct worklist *wk; TAILQ_REMOVE(adphead, adp, ad_next); newblk = (struct newblk *)adp; freework = NULL; /* * Find the correct freework structure. */ LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) { if (wk->wk_type != D_FREEWORK) continue; freework = WK_FREEWORK(wk); if (freework->fw_blkno == newblk->nb_newblkno) break; } if (freework == NULL) panic("cancel_allocdirect: Freework not found"); /* * If a newblk exists at all we still have the journal entry that * initiated the allocation so we do not need to journal the free. */ cancel_jfreeblk(freeblks, freework->fw_blkno); /* * If the journal hasn't been written the jnewblk must be passed * to the call to ffs_blkfree that reclaims the space. We accomplish * this by linking the journal dependency into the freework to be * freed when freework_freeblock() is called. If the journal has * been written we can simply reclaim the journal space when the * freeblks work is complete. */ freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list, &freeblks->fb_jwork); WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); } /* * Cancel a new block allocation. May be an indirect or direct block. We * remove it from various lists and return any journal record that needs to * be resolved by the caller. * * A special consideration is made for indirects which were never pointed * at on disk and will never be found once this block is released. */ static struct jnewblk * cancel_newblk(newblk, wk, wkhd) struct newblk *newblk; struct worklist *wk; struct workhead *wkhd; { struct jnewblk *jnewblk; CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno); newblk->nb_state |= GOINGAWAY; /* * Previously we traversed the completedhd on each indirdep * attached to this newblk to cancel them and gather journal * work. Since we need only the oldest journal segment and * the lowest point on the tree will always have the oldest * journal segment we are free to release the segments * of any subordinates and may leave the indirdep list to * indirdep_complete() when this newblk is freed. */ if (newblk->nb_state & ONDEPLIST) { newblk->nb_state &= ~ONDEPLIST; LIST_REMOVE(newblk, nb_deps); } if (newblk->nb_state & ONWORKLIST) WORKLIST_REMOVE(&newblk->nb_list); /* * If the journal entry hasn't been written we save a pointer to * the dependency that frees it until it is written or the * superseding operation completes. */ jnewblk = newblk->nb_jnewblk; if (jnewblk != NULL && wk != NULL) { newblk->nb_jnewblk = NULL; jnewblk->jn_dep = wk; } if (!LIST_EMPTY(&newblk->nb_jwork)) jwork_move(wkhd, &newblk->nb_jwork); /* * When truncating we must free the newdirblk early to remove * the pagedep from the hash before returning. */ if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) free_newdirblk(WK_NEWDIRBLK(wk)); if (!LIST_EMPTY(&newblk->nb_newdirblk)) panic("cancel_newblk: extra newdirblk"); return (jnewblk); } /* * Schedule the freefrag associated with a newblk to be released once * the pointers are written and the previous block is no longer needed. */ static void newblk_freefrag(newblk) struct newblk *newblk; { struct freefrag *freefrag; if (newblk->nb_freefrag == NULL) return; freefrag = newblk->nb_freefrag; newblk->nb_freefrag = NULL; freefrag->ff_state |= COMPLETE; if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) add_to_worklist(&freefrag->ff_list, 0); } /* * Free a newblk. Generate a new freefrag work request if appropriate. * This must be called after the inode pointer and any direct block pointers * are valid or fully removed via truncate or frag extension. */ static void free_newblk(newblk) struct newblk *newblk; { struct indirdep *indirdep; struct worklist *wk; KASSERT(newblk->nb_jnewblk == NULL, ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk)); KASSERT(newblk->nb_list.wk_type != D_NEWBLK, ("free_newblk: unclaimed newblk")); LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp)); newblk_freefrag(newblk); if (newblk->nb_state & ONDEPLIST) LIST_REMOVE(newblk, nb_deps); if (newblk->nb_state & ONWORKLIST) WORKLIST_REMOVE(&newblk->nb_list); LIST_REMOVE(newblk, nb_hash); if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) free_newdirblk(WK_NEWDIRBLK(wk)); if (!LIST_EMPTY(&newblk->nb_newdirblk)) panic("free_newblk: extra newdirblk"); while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL) indirdep_complete(indirdep); handle_jwork(&newblk->nb_jwork); WORKITEM_FREE(newblk, D_NEWBLK); } /* * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. * This routine must be called with splbio interrupts blocked. */ static void free_newdirblk(newdirblk) struct newdirblk *newdirblk; { struct pagedep *pagedep; struct diradd *dap; struct worklist *wk; LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp)); WORKLIST_REMOVE(&newdirblk->db_list); /* * If the pagedep is still linked onto the directory buffer * dependency chain, then some of the entries on the * pd_pendinghd list may not be committed to disk yet. In * this case, we will simply clear the NEWBLOCK flag and * let the pd_pendinghd list be processed when the pagedep * is next written. If the pagedep is no longer on the buffer * dependency chain, then all the entries on the pd_pending * list are committed to disk and we can free them here. */ pagedep = newdirblk->db_pagedep; pagedep->pd_state &= ~NEWBLOCK; if ((pagedep->pd_state & ONWORKLIST) == 0) { while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) free_diradd(dap, NULL); /* * If no dependencies remain, the pagedep will be freed. */ free_pagedep(pagedep); } /* Should only ever be one item in the list. */ while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) { WORKLIST_REMOVE(wk); handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); } WORKITEM_FREE(newdirblk, D_NEWDIRBLK); } /* * Prepare an inode to be freed. The actual free operation is not * done until the zero'ed inode has been written to disk. */ void softdep_freefile(pvp, ino, mode) struct vnode *pvp; ino_t ino; int mode; { struct inode *ip = VTOI(pvp); struct inodedep *inodedep; struct freefile *freefile; struct freeblks *freeblks; struct ufsmount *ump; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_freefile called on non-softdep filesystem")); /* * This sets up the inode de-allocation dependency. */ freefile = malloc(sizeof(struct freefile), M_FREEFILE, M_SOFTDEP_FLAGS); workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount); freefile->fx_mode = mode; freefile->fx_oldinum = ino; freefile->fx_devvp = ump->um_devvp; LIST_INIT(&freefile->fx_jwork); UFS_LOCK(ump); ump->um_fs->fs_pendinginodes += 1; UFS_UNLOCK(ump); /* * If the inodedep does not exist, then the zero'ed inode has * been written to disk. If the allocated inode has never been * written to disk, then the on-disk inode is zero'ed. In either * case we can free the file immediately. If the journal was * canceled before being written the inode will never make it to * disk and we must send the canceled journal entrys to * ffs_freefile() to be cleared in conjunction with the bitmap. * Any blocks waiting on the inode to write can be safely freed * here as it will never been written. */ ACQUIRE_LOCK(ump); inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); if (inodedep) { /* * Clear out freeblks that no longer need to reference * this inode. */ while ((freeblks = TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) { TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); freeblks->fb_state &= ~ONDEPLIST; } /* * Remove this inode from the unlinked list. */ if (inodedep->id_state & UNLINKED) { /* * Save the journal work to be freed with the bitmap * before we clear UNLINKED. Otherwise it can be lost * if the inode block is written. */ handle_bufwait(inodedep, &freefile->fx_jwork); clear_unlinked_inodedep(inodedep); /* * Re-acquire inodedep as we've dropped the * per-filesystem lock in clear_unlinked_inodedep(). */ inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); } } if (inodedep == NULL || check_inode_unwritten(inodedep)) { FREE_LOCK(ump); handle_workitem_freefile(freefile); return; } if ((inodedep->id_state & DEPCOMPLETE) == 0) inodedep->id_state |= GOINGAWAY; WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); FREE_LOCK(ump); if (ip->i_number == ino) ip->i_flag |= IN_MODIFIED; } /* * Check to see if an inode has never been written to disk. If * so free the inodedep and return success, otherwise return failure. * This routine must be called with splbio interrupts blocked. * * If we still have a bitmap dependency, then the inode has never * been written to disk. Drop the dependency as it is no longer * necessary since the inode is being deallocated. We set the * ALLCOMPLETE flags since the bitmap now properly shows that the * inode is not allocated. Even if the inode is actively being * written, it has been rolled back to its zero'ed state, so we * are ensured that a zero inode is what is on the disk. For short * lived files, this change will usually result in removing all the * dependencies from the inode so that it can be freed immediately. */ static int check_inode_unwritten(inodedep) struct inodedep *inodedep; { LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 || !LIST_EMPTY(&inodedep->id_dirremhd) || !LIST_EMPTY(&inodedep->id_pendinghd) || !LIST_EMPTY(&inodedep->id_bufwait) || !LIST_EMPTY(&inodedep->id_inowait) || !TAILQ_EMPTY(&inodedep->id_inoreflst) || !TAILQ_EMPTY(&inodedep->id_inoupdt) || !TAILQ_EMPTY(&inodedep->id_newinoupdt) || !TAILQ_EMPTY(&inodedep->id_extupdt) || !TAILQ_EMPTY(&inodedep->id_newextupdt) || !TAILQ_EMPTY(&inodedep->id_freeblklst) || inodedep->id_mkdiradd != NULL || inodedep->id_nlinkdelta != 0) return (0); /* * Another process might be in initiate_write_inodeblock_ufs[12] * trying to allocate memory without holding "Softdep Lock". */ if ((inodedep->id_state & IOSTARTED) != 0 && inodedep->id_savedino1 == NULL) return (0); if (inodedep->id_state & ONDEPLIST) LIST_REMOVE(inodedep, id_deps); inodedep->id_state &= ~ONDEPLIST; inodedep->id_state |= ALLCOMPLETE; inodedep->id_bmsafemap = NULL; if (inodedep->id_state & ONWORKLIST) WORKLIST_REMOVE(&inodedep->id_list); if (inodedep->id_savedino1 != NULL) { free(inodedep->id_savedino1, M_SAVEDINO); inodedep->id_savedino1 = NULL; } if (free_inodedep(inodedep) == 0) panic("check_inode_unwritten: busy inode"); return (1); } static int check_inodedep_free(inodedep) struct inodedep *inodedep; { LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || !LIST_EMPTY(&inodedep->id_dirremhd) || !LIST_EMPTY(&inodedep->id_pendinghd) || !LIST_EMPTY(&inodedep->id_bufwait) || !LIST_EMPTY(&inodedep->id_inowait) || !TAILQ_EMPTY(&inodedep->id_inoreflst) || !TAILQ_EMPTY(&inodedep->id_inoupdt) || !TAILQ_EMPTY(&inodedep->id_newinoupdt) || !TAILQ_EMPTY(&inodedep->id_extupdt) || !TAILQ_EMPTY(&inodedep->id_newextupdt) || !TAILQ_EMPTY(&inodedep->id_freeblklst) || inodedep->id_mkdiradd != NULL || inodedep->id_nlinkdelta != 0 || inodedep->id_savedino1 != NULL) return (0); return (1); } /* * Try to free an inodedep structure. Return 1 if it could be freed. */ static int free_inodedep(inodedep) struct inodedep *inodedep; { LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 || !check_inodedep_free(inodedep)) return (0); if (inodedep->id_state & ONDEPLIST) LIST_REMOVE(inodedep, id_deps); LIST_REMOVE(inodedep, id_hash); WORKITEM_FREE(inodedep, D_INODEDEP); return (1); } /* * Free the block referenced by a freework structure. The parent freeblks * structure is released and completed when the final cg bitmap reaches * the disk. This routine may be freeing a jnewblk which never made it to * disk in which case we do not have to wait as the operation is undone * in memory immediately. */ static void freework_freeblock(freework) struct freework *freework; { struct freeblks *freeblks; struct jnewblk *jnewblk; struct ufsmount *ump; struct workhead wkhd; struct fs *fs; int bsize; int needj; ump = VFSTOUFS(freework->fw_list.wk_mp); LOCK_OWNED(ump); /* * Handle partial truncate separately. */ if (freework->fw_indir) { complete_trunc_indir(freework); return; } freeblks = freework->fw_freeblks; fs = ump->um_fs; needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0; bsize = lfragtosize(fs, freework->fw_frags); LIST_INIT(&wkhd); /* * DEPCOMPLETE is cleared in indirblk_insert() if the block lives * on the indirblk hashtable and prevents premature freeing. */ freework->fw_state |= DEPCOMPLETE; /* * SUJ needs to wait for the segment referencing freed indirect * blocks to expire so that we know the checker will not confuse * a re-allocated indirect block with its old contents. */ if (needj && freework->fw_lbn <= -UFS_NDADDR) indirblk_insert(freework); /* * If we are canceling an existing jnewblk pass it to the free * routine, otherwise pass the freeblk which will ultimately * release the freeblks. If we're not journaling, we can just * free the freeblks immediately. */ jnewblk = freework->fw_jnewblk; if (jnewblk != NULL) { cancel_jnewblk(jnewblk, &wkhd); needj = 0; } else if (needj) { freework->fw_state |= DELAYEDFREE; freeblks->fb_cgwait++; WORKLIST_INSERT(&wkhd, &freework->fw_list); } FREE_LOCK(ump); freeblks_free(ump, freeblks, btodb(bsize)); CTR4(KTR_SUJ, "freework_freeblock: ino %d blkno %jd lbn %jd size %ld", freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize); ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize, freeblks->fb_inum, freeblks->fb_vtype, &wkhd); ACQUIRE_LOCK(ump); /* * The jnewblk will be discarded and the bits in the map never * made it to disk. We can immediately free the freeblk. */ if (needj == 0) handle_written_freework(freework); } /* * We enqueue freework items that need processing back on the freeblks and * add the freeblks to the worklist. This makes it easier to find all work * required to flush a truncation in process_truncates(). */ static void freework_enqueue(freework) struct freework *freework; { struct freeblks *freeblks; freeblks = freework->fw_freeblks; if ((freework->fw_state & INPROGRESS) == 0) WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); if ((freeblks->fb_state & (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) add_to_worklist(&freeblks->fb_list, WK_NODELAY); } /* * Start, continue, or finish the process of freeing an indirect block tree. * The free operation may be paused at any point with fw_off containing the * offset to restart from. This enables us to implement some flow control * for large truncates which may fan out and generate a huge number of * dependencies. */ static void handle_workitem_indirblk(freework) struct freework *freework; { struct freeblks *freeblks; struct ufsmount *ump; struct fs *fs; freeblks = freework->fw_freeblks; ump = VFSTOUFS(freeblks->fb_list.wk_mp); fs = ump->um_fs; if (freework->fw_state & DEPCOMPLETE) { handle_written_freework(freework); return; } if (freework->fw_off == NINDIR(fs)) { freework_freeblock(freework); return; } freework->fw_state |= INPROGRESS; FREE_LOCK(ump); indir_trunc(freework, fsbtodb(fs, freework->fw_blkno), freework->fw_lbn); ACQUIRE_LOCK(ump); } /* * Called when a freework structure attached to a cg buf is written. The * ref on either the parent or the freeblks structure is released and * the freeblks is added back to the worklist if there is more work to do. */ static void handle_written_freework(freework) struct freework *freework; { struct freeblks *freeblks; struct freework *parent; freeblks = freework->fw_freeblks; parent = freework->fw_parent; if (freework->fw_state & DELAYEDFREE) freeblks->fb_cgwait--; freework->fw_state |= COMPLETE; if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) WORKITEM_FREE(freework, D_FREEWORK); if (parent) { if (--parent->fw_ref == 0) freework_enqueue(parent); return; } if (--freeblks->fb_ref != 0) return; if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) == ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) add_to_worklist(&freeblks->fb_list, WK_NODELAY); } /* * This workitem routine performs the block de-allocation. * The workitem is added to the pending list after the updated * inode block has been written to disk. As mentioned above, * checks regarding the number of blocks de-allocated (compared * to the number of blocks allocated for the file) are also * performed in this function. */ static int handle_workitem_freeblocks(freeblks, flags) struct freeblks *freeblks; int flags; { struct freework *freework; struct newblk *newblk; struct allocindir *aip; struct ufsmount *ump; struct worklist *wk; KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd), ("handle_workitem_freeblocks: Journal entries not written.")); ump = VFSTOUFS(freeblks->fb_list.wk_mp); ACQUIRE_LOCK(ump); while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) { WORKLIST_REMOVE(wk); switch (wk->wk_type) { case D_DIRREM: wk->wk_state |= COMPLETE; add_to_worklist(wk, 0); continue; case D_ALLOCDIRECT: free_newblk(WK_NEWBLK(wk)); continue; case D_ALLOCINDIR: aip = WK_ALLOCINDIR(wk); freework = NULL; if (aip->ai_state & DELAYEDFREE) { FREE_LOCK(ump); freework = newfreework(ump, freeblks, NULL, aip->ai_lbn, aip->ai_newblkno, ump->um_fs->fs_frag, 0, 0); ACQUIRE_LOCK(ump); } newblk = WK_NEWBLK(wk); if (newblk->nb_jnewblk) { freework->fw_jnewblk = newblk->nb_jnewblk; newblk->nb_jnewblk->jn_dep = &freework->fw_list; newblk->nb_jnewblk = NULL; } free_newblk(newblk); continue; case D_FREEWORK: freework = WK_FREEWORK(wk); if (freework->fw_lbn <= -UFS_NDADDR) handle_workitem_indirblk(freework); else freework_freeblock(freework); continue; default: panic("handle_workitem_freeblocks: Unknown type %s", TYPENAME(wk->wk_type)); } } if (freeblks->fb_ref != 0) { freeblks->fb_state &= ~INPROGRESS; wake_worklist(&freeblks->fb_list); freeblks = NULL; } FREE_LOCK(ump); if (freeblks) return handle_complete_freeblocks(freeblks, flags); return (0); } /* * Handle completion of block free via truncate. This allows fs_pending * to track the actual free block count more closely than if we only updated * it at the end. We must be careful to handle cases where the block count * on free was incorrect. */ static void freeblks_free(ump, freeblks, blocks) struct ufsmount *ump; struct freeblks *freeblks; int blocks; { struct fs *fs; ufs2_daddr_t remain; UFS_LOCK(ump); remain = -freeblks->fb_chkcnt; freeblks->fb_chkcnt += blocks; if (remain > 0) { if (remain < blocks) blocks = remain; fs = ump->um_fs; fs->fs_pendingblocks -= blocks; } UFS_UNLOCK(ump); } /* * Once all of the freework workitems are complete we can retire the * freeblocks dependency and any journal work awaiting completion. This * can not be called until all other dependencies are stable on disk. */ static int handle_complete_freeblocks(freeblks, flags) struct freeblks *freeblks; int flags; { struct inodedep *inodedep; struct inode *ip; struct vnode *vp; struct fs *fs; struct ufsmount *ump; ufs2_daddr_t spare; ump = VFSTOUFS(freeblks->fb_list.wk_mp); fs = ump->um_fs; flags = LK_EXCLUSIVE | flags; spare = freeblks->fb_chkcnt; /* * If we did not release the expected number of blocks we may have * to adjust the inode block count here. Only do so if it wasn't * a truncation to zero and the modrev still matches. */ if (spare && freeblks->fb_len != 0) { if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum, flags, &vp, FFSV_FORCEINSMQ) != 0) return (EBUSY); ip = VTOI(vp); if (DIP(ip, i_modrev) == freeblks->fb_modrev) { DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare); ip->i_flag |= IN_CHANGE; /* * We must wait so this happens before the * journal is reclaimed. */ ffs_update(vp, 1); } vput(vp); } if (spare < 0) { UFS_LOCK(ump); fs->fs_pendingblocks += spare; UFS_UNLOCK(ump); } #ifdef QUOTA /* Handle spare. */ if (spare) quotaadj(freeblks->fb_quota, ump, -spare); quotarele(freeblks->fb_quota); #endif ACQUIRE_LOCK(ump); if (freeblks->fb_state & ONDEPLIST) { inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum, 0, &inodedep); TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); freeblks->fb_state &= ~ONDEPLIST; if (TAILQ_EMPTY(&inodedep->id_freeblklst)) free_inodedep(inodedep); } /* * All of the freeblock deps must be complete prior to this call * so it's now safe to complete earlier outstanding journal entries. */ handle_jwork(&freeblks->fb_jwork); WORKITEM_FREE(freeblks, D_FREEBLKS); FREE_LOCK(ump); return (0); } /* * Release blocks associated with the freeblks and stored in the indirect * block dbn. If level is greater than SINGLE, the block is an indirect block * and recursive calls to indirtrunc must be used to cleanse other indirect * blocks. * * This handles partial and complete truncation of blocks. Partial is noted * with goingaway == 0. In this case the freework is completed after the * zero'd indirects are written to disk. For full truncation the freework * is completed after the block is freed. */ static void indir_trunc(freework, dbn, lbn) struct freework *freework; ufs2_daddr_t dbn; ufs_lbn_t lbn; { struct freework *nfreework; struct workhead wkhd; struct freeblks *freeblks; struct buf *bp; struct fs *fs; struct indirdep *indirdep; struct ufsmount *ump; ufs1_daddr_t *bap1; ufs2_daddr_t nb, nnb, *bap2; ufs_lbn_t lbnadd, nlbn; int i, nblocks, ufs1fmt; int freedblocks; int goingaway; int freedeps; int needj; int level; int cnt; freeblks = freework->fw_freeblks; ump = VFSTOUFS(freeblks->fb_list.wk_mp); fs = ump->um_fs; /* * Get buffer of block pointers to be freed. There are three cases: * * 1) Partial truncate caches the indirdep pointer in the freework * which provides us a back copy to the save bp which holds the * pointers we want to clear. When this completes the zero * pointers are written to the real copy. * 2) The indirect is being completely truncated, cancel_indirdep() * eliminated the real copy and placed the indirdep on the saved * copy. The indirdep and buf are discarded when this completes. * 3) The indirect was not in memory, we read a copy off of the disk * using the devvp and drop and invalidate the buffer when we're * done. */ goingaway = 1; indirdep = NULL; if (freework->fw_indir != NULL) { goingaway = 0; indirdep = freework->fw_indir; bp = indirdep->ir_savebp; if (bp == NULL || bp->b_blkno != dbn) panic("indir_trunc: Bad saved buf %p blkno %jd", bp, (intmax_t)dbn); } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) { /* * The lock prevents the buf dep list from changing and * indirects on devvp should only ever have one dependency. */ indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep)); if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0) panic("indir_trunc: Bad indirdep %p from buf %p", indirdep, bp); } else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, NOCRED, &bp) != 0) { brelse(bp); return; } ACQUIRE_LOCK(ump); /* Protects against a race with complete_trunc_indir(). */ freework->fw_state &= ~INPROGRESS; /* * If we have an indirdep we need to enforce the truncation order * and discard it when it is complete. */ if (indirdep) { if (freework != TAILQ_FIRST(&indirdep->ir_trunc) && !TAILQ_EMPTY(&indirdep->ir_trunc)) { /* * Add the complete truncate to the list on the * indirdep to enforce in-order processing. */ if (freework->fw_indir == NULL) TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); FREE_LOCK(ump); return; } /* * If we're goingaway, free the indirdep. Otherwise it will * linger until the write completes. */ if (goingaway) free_indirdep(indirdep); } FREE_LOCK(ump); /* Initialize pointers depending on block size. */ if (ump->um_fstype == UFS1) { bap1 = (ufs1_daddr_t *)bp->b_data; nb = bap1[freework->fw_off]; ufs1fmt = 1; bap2 = NULL; } else { bap2 = (ufs2_daddr_t *)bp->b_data; nb = bap2[freework->fw_off]; ufs1fmt = 0; bap1 = NULL; } level = lbn_level(lbn); needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0; lbnadd = lbn_offset(fs, level); nblocks = btodb(fs->fs_bsize); nfreework = freework; freedeps = 0; cnt = 0; /* * Reclaim blocks. Traverses into nested indirect levels and * arranges for the current level to be freed when subordinates * are free when journaling. */ for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) { if (i != NINDIR(fs) - 1) { if (ufs1fmt) nnb = bap1[i+1]; else nnb = bap2[i+1]; } else nnb = 0; if (nb == 0) continue; cnt++; if (level != 0) { nlbn = (lbn + 1) - (i * lbnadd); if (needj != 0) { nfreework = newfreework(ump, freeblks, freework, nlbn, nb, fs->fs_frag, 0, 0); freedeps++; } indir_trunc(nfreework, fsbtodb(fs, nb), nlbn); } else { struct freedep *freedep; /* * Attempt to aggregate freedep dependencies for * all blocks being released to the same CG. */ LIST_INIT(&wkhd); if (needj != 0 && (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) { freedep = newfreedep(freework); WORKLIST_INSERT_UNLOCKED(&wkhd, &freedep->fd_list); freedeps++; } CTR3(KTR_SUJ, "indir_trunc: ino %d blkno %jd size %ld", freeblks->fb_inum, nb, fs->fs_bsize); ffs_blkfree(ump, fs, freeblks->fb_devvp, nb, fs->fs_bsize, freeblks->fb_inum, freeblks->fb_vtype, &wkhd); } } if (goingaway) { bp->b_flags |= B_INVAL | B_NOCACHE; brelse(bp); } freedblocks = 0; if (level == 0) freedblocks = (nblocks * cnt); if (needj == 0) freedblocks += nblocks; freeblks_free(ump, freeblks, freedblocks); /* * If we are journaling set up the ref counts and offset so this * indirect can be completed when its children are free. */ if (needj) { ACQUIRE_LOCK(ump); freework->fw_off = i; freework->fw_ref += freedeps; freework->fw_ref -= NINDIR(fs) + 1; if (level == 0) freeblks->fb_cgwait += freedeps; if (freework->fw_ref == 0) freework_freeblock(freework); FREE_LOCK(ump); return; } /* * If we're not journaling we can free the indirect now. */ dbn = dbtofsb(fs, dbn); CTR3(KTR_SUJ, "indir_trunc 2: ino %d blkno %jd size %ld", freeblks->fb_inum, dbn, fs->fs_bsize); ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize, freeblks->fb_inum, freeblks->fb_vtype, NULL); /* Non SUJ softdep does single-threaded truncations. */ if (freework->fw_blkno == dbn) { freework->fw_state |= ALLCOMPLETE; ACQUIRE_LOCK(ump); handle_written_freework(freework); FREE_LOCK(ump); } return; } /* * Cancel an allocindir when it is removed via truncation. When bp is not * NULL the indirect never appeared on disk and is scheduled to be freed * independently of the indir so we can more easily track journal work. */ static void cancel_allocindir(aip, bp, freeblks, trunc) struct allocindir *aip; struct buf *bp; struct freeblks *freeblks; int trunc; { struct indirdep *indirdep; struct freefrag *freefrag; struct newblk *newblk; newblk = (struct newblk *)aip; LIST_REMOVE(aip, ai_next); /* * We must eliminate the pointer in bp if it must be freed on its * own due to partial truncate or pending journal work. */ if (bp && (trunc || newblk->nb_jnewblk)) { /* * Clear the pointer and mark the aip to be freed * directly if it never existed on disk. */ aip->ai_state |= DELAYEDFREE; indirdep = aip->ai_indirdep; if (indirdep->ir_state & UFS1FMT) ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0; else ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0; } /* * When truncating the previous pointer will be freed via * savedbp. Eliminate the freefrag which would dup free. */ if (trunc && (freefrag = newblk->nb_freefrag) != NULL) { newblk->nb_freefrag = NULL; if (freefrag->ff_jdep) cancel_jfreefrag( WK_JFREEFRAG(freefrag->ff_jdep)); jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork); WORKITEM_FREE(freefrag, D_FREEFRAG); } /* * If the journal hasn't been written the jnewblk must be passed * to the call to ffs_blkfree that reclaims the space. We accomplish * this by leaving the journal dependency on the newblk to be freed * when a freework is created in handle_workitem_freeblocks(). */ cancel_newblk(newblk, NULL, &freeblks->fb_jwork); WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); } /* * Create the mkdir dependencies for . and .. in a new directory. Link them * in to a newdirblk so any subsequent additions are tracked properly. The * caller is responsible for adding the mkdir1 dependency to the journal * and updating id_mkdiradd. This function returns with the per-filesystem * lock held. */ static struct mkdir * setup_newdir(dap, newinum, dinum, newdirbp, mkdirp) struct diradd *dap; ino_t newinum; ino_t dinum; struct buf *newdirbp; struct mkdir **mkdirp; { struct newblk *newblk; struct pagedep *pagedep; struct inodedep *inodedep; struct newdirblk *newdirblk; struct mkdir *mkdir1, *mkdir2; struct worklist *wk; struct jaddref *jaddref; struct ufsmount *ump; struct mount *mp; mp = dap->da_list.wk_mp; ump = VFSTOUFS(mp); newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, M_SOFTDEP_FLAGS); workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); LIST_INIT(&newdirblk->db_mkdir); mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); workitem_alloc(&mkdir1->md_list, D_MKDIR, mp); mkdir1->md_state = ATTACHED | MKDIR_BODY; mkdir1->md_diradd = dap; mkdir1->md_jaddref = NULL; mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); workitem_alloc(&mkdir2->md_list, D_MKDIR, mp); mkdir2->md_state = ATTACHED | MKDIR_PARENT; mkdir2->md_diradd = dap; mkdir2->md_jaddref = NULL; if (MOUNTEDSUJ(mp) == 0) { mkdir1->md_state |= DEPCOMPLETE; mkdir2->md_state |= DEPCOMPLETE; } /* * Dependency on "." and ".." being written to disk. */ mkdir1->md_buf = newdirbp; ACQUIRE_LOCK(VFSTOUFS(mp)); LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs); /* * We must link the pagedep, allocdirect, and newdirblk for * the initial file page so the pointer to the new directory * is not written until the directory contents are live and * any subsequent additions are not marked live until the * block is reachable via the inode. */ if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0) panic("setup_newdir: lost pagedep"); LIST_FOREACH(wk, &newdirbp->b_dep, wk_list) if (wk->wk_type == D_ALLOCDIRECT) break; if (wk == NULL) panic("setup_newdir: lost allocdirect"); if (pagedep->pd_state & NEWBLOCK) panic("setup_newdir: NEWBLOCK already set"); newblk = WK_NEWBLK(wk); pagedep->pd_state |= NEWBLOCK; pagedep->pd_newdirblk = newdirblk; newdirblk->db_pagedep = pagedep; WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list); /* * Look up the inodedep for the parent directory so that we * can link mkdir2 into the pending dotdot jaddref or * the inode write if there is none. If the inode is * ALLCOMPLETE and no jaddref is present all dependencies have * been satisfied and mkdir2 can be freed. */ inodedep_lookup(mp, dinum, 0, &inodedep); if (MOUNTEDSUJ(mp)) { if (inodedep == NULL) panic("setup_newdir: Lost parent."); jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL && jaddref->ja_parent == newinum && (jaddref->ja_state & MKDIR_PARENT), ("setup_newdir: bad dotdot jaddref %p", jaddref)); LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); mkdir2->md_jaddref = jaddref; jaddref->ja_mkdir = mkdir2; } else if (inodedep == NULL || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { dap->da_state &= ~MKDIR_PARENT; WORKITEM_FREE(mkdir2, D_MKDIR); mkdir2 = NULL; } else { LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list); } *mkdirp = mkdir2; return (mkdir1); } /* * Directory entry addition dependencies. * * When adding a new directory entry, the inode (with its incremented link * count) must be written to disk before the directory entry's pointer to it. * Also, if the inode is newly allocated, the corresponding freemap must be * updated (on disk) before the directory entry's pointer. These requirements * are met via undo/redo on the directory entry's pointer, which consists * simply of the inode number. * * As directory entries are added and deleted, the free space within a * directory block can become fragmented. The ufs filesystem will compact * a fragmented directory block to make space for a new entry. When this * occurs, the offsets of previously added entries change. Any "diradd" * dependency structures corresponding to these entries must be updated with * the new offsets. */ /* * This routine is called after the in-memory inode's link * count has been incremented, but before the directory entry's * pointer to the inode has been set. */ int softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) struct buf *bp; /* buffer containing directory block */ struct inode *dp; /* inode for directory */ off_t diroffset; /* offset of new entry in directory */ ino_t newinum; /* inode referenced by new directory entry */ struct buf *newdirbp; /* non-NULL => contents of new mkdir */ int isnewblk; /* entry is in a newly allocated block */ { int offset; /* offset of new entry within directory block */ ufs_lbn_t lbn; /* block in directory containing new entry */ struct fs *fs; struct diradd *dap; struct newblk *newblk; struct pagedep *pagedep; struct inodedep *inodedep; struct newdirblk *newdirblk; struct mkdir *mkdir1, *mkdir2; struct jaddref *jaddref; struct ufsmount *ump; struct mount *mp; int isindir; mp = ITOVFS(dp); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_directory_add called on non-softdep filesystem")); /* * Whiteouts have no dependencies. */ if (newinum == UFS_WINO) { if (newdirbp != NULL) bdwrite(newdirbp); return (0); } jaddref = NULL; mkdir1 = mkdir2 = NULL; fs = ump->um_fs; lbn = lblkno(fs, diroffset); offset = blkoff(fs, diroffset); dap = malloc(sizeof(struct diradd), M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); workitem_alloc(&dap->da_list, D_DIRADD, mp); dap->da_offset = offset; dap->da_newinum = newinum; dap->da_state = ATTACHED; LIST_INIT(&dap->da_jwork); isindir = bp->b_lblkno >= UFS_NDADDR; newdirblk = NULL; if (isnewblk && (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) { newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, M_SOFTDEP_FLAGS); workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); LIST_INIT(&newdirblk->db_mkdir); } /* * If we're creating a new directory setup the dependencies and set * the dap state to wait for them. Otherwise it's COMPLETE and * we can move on. */ if (newdirbp == NULL) { dap->da_state |= DEPCOMPLETE; ACQUIRE_LOCK(ump); } else { dap->da_state |= MKDIR_BODY | MKDIR_PARENT; mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp, &mkdir2); } /* * Link into parent directory pagedep to await its being written. */ pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep); #ifdef DEBUG if (diradd_lookup(pagedep, offset) != NULL) panic("softdep_setup_directory_add: %p already at off %d\n", diradd_lookup(pagedep, offset), offset); #endif dap->da_pagedep = pagedep; LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, da_pdlist); inodedep_lookup(mp, newinum, DEPALLOC, &inodedep); /* * If we're journaling, link the diradd into the jaddref so it * may be completed after the journal entry is written. Otherwise, * link the diradd into its inodedep. If the inode is not yet * written place it on the bufwait list, otherwise do the post-inode * write processing to put it on the id_pendinghd list. */ if (MOUNTEDSUJ(mp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, ("softdep_setup_directory_add: bad jaddref %p", jaddref)); jaddref->ja_diroff = diroffset; jaddref->ja_diradd = dap; add_to_journal(&jaddref->ja_list); } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) diradd_inode_written(dap, inodedep); else WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); /* * Add the journal entries for . and .. links now that the primary * link is written. */ if (mkdir1 != NULL && MOUNTEDSUJ(mp)) { jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, inoreflst, if_deps); KASSERT(jaddref != NULL && jaddref->ja_ino == jaddref->ja_parent && (jaddref->ja_state & MKDIR_BODY), ("softdep_setup_directory_add: bad dot jaddref %p", jaddref)); mkdir1->md_jaddref = jaddref; jaddref->ja_mkdir = mkdir1; /* * It is important that the dotdot journal entry * is added prior to the dot entry since dot writes * both the dot and dotdot links. These both must * be added after the primary link for the journal * to remain consistent. */ add_to_journal(&mkdir2->md_jaddref->ja_list); add_to_journal(&jaddref->ja_list); } /* * If we are adding a new directory remember this diradd so that if * we rename it we can keep the dot and dotdot dependencies. If * we are adding a new name for an inode that has a mkdiradd we * must be in rename and we have to move the dot and dotdot * dependencies to this new name. The old name is being orphaned * soon. */ if (mkdir1 != NULL) { if (inodedep->id_mkdiradd != NULL) panic("softdep_setup_directory_add: Existing mkdir"); inodedep->id_mkdiradd = dap; } else if (inodedep->id_mkdiradd) merge_diradd(inodedep, dap); if (newdirblk != NULL) { /* * There is nothing to do if we are already tracking * this block. */ if ((pagedep->pd_state & NEWBLOCK) != 0) { WORKITEM_FREE(newdirblk, D_NEWDIRBLK); FREE_LOCK(ump); return (0); } if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk) == 0) panic("softdep_setup_directory_add: lost entry"); WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); pagedep->pd_state |= NEWBLOCK; pagedep->pd_newdirblk = newdirblk; newdirblk->db_pagedep = pagedep; FREE_LOCK(ump); /* * If we extended into an indirect signal direnter to sync. */ if (isindir) return (1); return (0); } FREE_LOCK(ump); return (0); } /* * This procedure is called to change the offset of a directory * entry when compacting a directory block which must be owned * exclusively by the caller. Note that the actual entry movement * must be done in this procedure to ensure that no I/O completions * occur while the move is in progress. */ void softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) struct buf *bp; /* Buffer holding directory block. */ struct inode *dp; /* inode for directory */ caddr_t base; /* address of dp->i_offset */ caddr_t oldloc; /* address of old directory location */ caddr_t newloc; /* address of new directory location */ int entrysize; /* size of directory entry */ { int offset, oldoffset, newoffset; struct pagedep *pagedep; struct jmvref *jmvref; struct diradd *dap; struct direct *de; struct mount *mp; struct ufsmount *ump; ufs_lbn_t lbn; int flags; mp = ITOVFS(dp); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_change_directoryentry_offset called on " "non-softdep filesystem")); de = (struct direct *)oldloc; jmvref = NULL; flags = 0; /* * Moves are always journaled as it would be too complex to * determine if any affected adds or removes are present in the * journal. */ if (MOUNTEDSUJ(mp)) { flags = DEPALLOC; jmvref = newjmvref(dp, de->d_ino, dp->i_offset + (oldloc - base), dp->i_offset + (newloc - base)); } lbn = lblkno(ump->um_fs, dp->i_offset); offset = blkoff(ump->um_fs, dp->i_offset); oldoffset = offset + (oldloc - base); newoffset = offset + (newloc - base); ACQUIRE_LOCK(ump); if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0) goto done; dap = diradd_lookup(pagedep, oldoffset); if (dap) { dap->da_offset = newoffset; newoffset = DIRADDHASH(newoffset); oldoffset = DIRADDHASH(oldoffset); if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE && newoffset != oldoffset) { LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset], dap, da_pdlist); } } done: if (jmvref) { jmvref->jm_pagedep = pagedep; LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps); add_to_journal(&jmvref->jm_list); } bcopy(oldloc, newloc, entrysize); FREE_LOCK(ump); } /* * Move the mkdir dependencies and journal work from one diradd to another * when renaming a directory. The new name must depend on the mkdir deps * completing as the old name did. Directories can only have one valid link * at a time so one must be canonical. */ static void merge_diradd(inodedep, newdap) struct inodedep *inodedep; struct diradd *newdap; { struct diradd *olddap; struct mkdir *mkdir, *nextmd; struct ufsmount *ump; short state; olddap = inodedep->id_mkdiradd; inodedep->id_mkdiradd = newdap; if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { newdap->da_state &= ~DEPCOMPLETE; ump = VFSTOUFS(inodedep->id_list.wk_mp); for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; mkdir = nextmd) { nextmd = LIST_NEXT(mkdir, md_mkdirs); if (mkdir->md_diradd != olddap) continue; mkdir->md_diradd = newdap; state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY); newdap->da_state |= state; olddap->da_state &= ~state; if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) break; } if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) panic("merge_diradd: unfound ref"); } /* * Any mkdir related journal items are not safe to be freed until * the new name is stable. */ jwork_move(&newdap->da_jwork, &olddap->da_jwork); olddap->da_state |= DEPCOMPLETE; complete_diradd(olddap); } /* * Move the diradd to the pending list when all diradd dependencies are * complete. */ static void complete_diradd(dap) struct diradd *dap; { struct pagedep *pagedep; if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { if (dap->da_state & DIRCHG) pagedep = dap->da_previous->dm_pagedep; else pagedep = dap->da_pagedep; LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); } } /* * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal * add entries and conditonally journal the remove. */ static void cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref) struct diradd *dap; struct dirrem *dirrem; struct jremref *jremref; struct jremref *dotremref; struct jremref *dotdotremref; { struct inodedep *inodedep; struct jaddref *jaddref; struct inoref *inoref; struct ufsmount *ump; struct mkdir *mkdir; /* * If no remove references were allocated we're on a non-journaled * filesystem and can skip the cancel step. */ if (jremref == NULL) { free_diradd(dap, NULL); return; } /* * Cancel the primary name an free it if it does not require * journaling. */ if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum, 0, &inodedep) != 0) { /* Abort the addref that reference this diradd. */ TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if (inoref->if_list.wk_type != D_JADDREF) continue; jaddref = (struct jaddref *)inoref; if (jaddref->ja_diradd != dap) continue; if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork) == 0) { free_jremref(jremref); jremref = NULL; } break; } } /* * Cancel subordinate names and free them if they do not require * journaling. */ if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { ump = VFSTOUFS(dap->da_list.wk_mp); LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) { if (mkdir->md_diradd != dap) continue; if ((jaddref = mkdir->md_jaddref) == NULL) continue; mkdir->md_jaddref = NULL; if (mkdir->md_state & MKDIR_PARENT) { if (cancel_jaddref(jaddref, NULL, &dirrem->dm_jwork) == 0) { free_jremref(dotdotremref); dotdotremref = NULL; } } else { if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork) == 0) { free_jremref(dotremref); dotremref = NULL; } } } } if (jremref) journal_jremref(dirrem, jremref, inodedep); if (dotremref) journal_jremref(dirrem, dotremref, inodedep); if (dotdotremref) journal_jremref(dirrem, dotdotremref, NULL); jwork_move(&dirrem->dm_jwork, &dap->da_jwork); free_diradd(dap, &dirrem->dm_jwork); } /* * Free a diradd dependency structure. This routine must be called * with splbio interrupts blocked. */ static void free_diradd(dap, wkhd) struct diradd *dap; struct workhead *wkhd; { struct dirrem *dirrem; struct pagedep *pagedep; struct inodedep *inodedep; struct mkdir *mkdir, *nextmd; struct ufsmount *ump; ump = VFSTOUFS(dap->da_list.wk_mp); LOCK_OWNED(ump); LIST_REMOVE(dap, da_pdlist); if (dap->da_state & ONWORKLIST) WORKLIST_REMOVE(&dap->da_list); if ((dap->da_state & DIRCHG) == 0) { pagedep = dap->da_pagedep; } else { dirrem = dap->da_previous; pagedep = dirrem->dm_pagedep; dirrem->dm_dirinum = pagedep->pd_ino; dirrem->dm_state |= COMPLETE; if (LIST_EMPTY(&dirrem->dm_jremrefhd)) add_to_worklist(&dirrem->dm_list, 0); } if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum, 0, &inodedep) != 0) if (inodedep->id_mkdiradd == dap) inodedep->id_mkdiradd = NULL; if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; mkdir = nextmd) { nextmd = LIST_NEXT(mkdir, md_mkdirs); if (mkdir->md_diradd != dap) continue; dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); LIST_REMOVE(mkdir, md_mkdirs); if (mkdir->md_state & ONWORKLIST) WORKLIST_REMOVE(&mkdir->md_list); if (mkdir->md_jaddref != NULL) panic("free_diradd: Unexpected jaddref"); WORKITEM_FREE(mkdir, D_MKDIR); if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) break; } if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) panic("free_diradd: unfound ref"); } if (inodedep) free_inodedep(inodedep); /* * Free any journal segments waiting for the directory write. */ handle_jwork(&dap->da_jwork); WORKITEM_FREE(dap, D_DIRADD); } /* * Directory entry removal dependencies. * * When removing a directory entry, the entry's inode pointer must be * zero'ed on disk before the corresponding inode's link count is decremented * (possibly freeing the inode for re-use). This dependency is handled by * updating the directory entry but delaying the inode count reduction until * after the directory block has been written to disk. After this point, the * inode count can be decremented whenever it is convenient. */ /* * This routine should be called immediately after removing * a directory entry. The inode's link count should not be * decremented by the calling procedure -- the soft updates * code will do this task when it is safe. */ void softdep_setup_remove(bp, dp, ip, isrmdir) struct buf *bp; /* buffer containing directory block */ struct inode *dp; /* inode for the directory being modified */ struct inode *ip; /* inode for directory entry being removed */ int isrmdir; /* indicates if doing RMDIR */ { struct dirrem *dirrem, *prevdirrem; struct inodedep *inodedep; struct ufsmount *ump; int direct; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_setup_remove called on non-softdep filesystem")); /* * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want * newdirrem() to setup the full directory remove which requires * isrmdir > 1. */ dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); /* * Add the dirrem to the inodedep's pending remove list for quick * discovery later. */ if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0) panic("softdep_setup_remove: Lost inodedep."); KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); dirrem->dm_state |= ONDEPLIST; LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); /* * If the COMPLETE flag is clear, then there were no active * entries and we want to roll back to a zeroed entry until * the new inode is committed to disk. If the COMPLETE flag is * set then we have deleted an entry that never made it to * disk. If the entry we deleted resulted from a name change, * then the old name still resides on disk. We cannot delete * its inode (returned to us in prevdirrem) until the zeroed * directory entry gets to disk. The new inode has never been * referenced on the disk, so can be deleted immediately. */ if ((dirrem->dm_state & COMPLETE) == 0) { LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, dm_next); FREE_LOCK(ump); } else { if (prevdirrem != NULL) LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, prevdirrem, dm_next); dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; direct = LIST_EMPTY(&dirrem->dm_jremrefhd); FREE_LOCK(ump); if (direct) handle_workitem_remove(dirrem, 0); } } /* * Check for an entry matching 'offset' on both the pd_dirraddhd list and the * pd_pendinghd list of a pagedep. */ static struct diradd * diradd_lookup(pagedep, offset) struct pagedep *pagedep; int offset; { struct diradd *dap; LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) if (dap->da_offset == offset) return (dap); LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) if (dap->da_offset == offset) return (dap); return (NULL); } /* * Search for a .. diradd dependency in a directory that is being removed. * If the directory was renamed to a new parent we have a diradd rather * than a mkdir for the .. entry. We need to cancel it now before * it is found in truncate(). */ static struct jremref * cancel_diradd_dotdot(ip, dirrem, jremref) struct inode *ip; struct dirrem *dirrem; struct jremref *jremref; { struct pagedep *pagedep; struct diradd *dap; struct worklist *wk; if (pagedep_lookup(ITOVFS(ip), NULL, ip->i_number, 0, 0, &pagedep) == 0) return (jremref); dap = diradd_lookup(pagedep, DOTDOT_OFFSET); if (dap == NULL) return (jremref); cancel_diradd(dap, dirrem, jremref, NULL, NULL); /* * Mark any journal work as belonging to the parent so it is freed * with the .. reference. */ LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) wk->wk_state |= MKDIR_PARENT; return (NULL); } /* * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to * replace it with a dirrem/diradd pair as a result of re-parenting a * directory. This ensures that we don't simultaneously have a mkdir and * a diradd for the same .. entry. */ static struct jremref * cancel_mkdir_dotdot(ip, dirrem, jremref) struct inode *ip; struct dirrem *dirrem; struct jremref *jremref; { struct inodedep *inodedep; struct jaddref *jaddref; struct ufsmount *ump; struct mkdir *mkdir; struct diradd *dap; struct mount *mp; mp = ITOVFS(ip); if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) return (jremref); dap = inodedep->id_mkdiradd; if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0) return (jremref); ump = VFSTOUFS(inodedep->id_list.wk_mp); for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; mkdir = LIST_NEXT(mkdir, md_mkdirs)) if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT) break; if (mkdir == NULL) panic("cancel_mkdir_dotdot: Unable to find mkdir\n"); if ((jaddref = mkdir->md_jaddref) != NULL) { mkdir->md_jaddref = NULL; jaddref->ja_state &= ~MKDIR_PARENT; if (inodedep_lookup(mp, jaddref->ja_ino, 0, &inodedep) == 0) panic("cancel_mkdir_dotdot: Lost parent inodedep"); if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) { journal_jremref(dirrem, jremref, inodedep); jremref = NULL; } } if (mkdir->md_state & ONWORKLIST) WORKLIST_REMOVE(&mkdir->md_list); mkdir->md_state |= ALLCOMPLETE; complete_mkdir(mkdir); return (jremref); } static void journal_jremref(dirrem, jremref, inodedep) struct dirrem *dirrem; struct jremref *jremref; struct inodedep *inodedep; { if (inodedep == NULL) if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, &inodedep) == 0) panic("journal_jremref: Lost inodedep"); LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps); TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); add_to_journal(&jremref->jr_list); } static void dirrem_journal(dirrem, jremref, dotremref, dotdotremref) struct dirrem *dirrem; struct jremref *jremref; struct jremref *dotremref; struct jremref *dotdotremref; { struct inodedep *inodedep; if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, &inodedep) == 0) panic("dirrem_journal: Lost inodedep"); journal_jremref(dirrem, jremref, inodedep); if (dotremref) journal_jremref(dirrem, dotremref, inodedep); if (dotdotremref) journal_jremref(dirrem, dotdotremref, NULL); } /* * Allocate a new dirrem if appropriate and return it along with * its associated pagedep. Called without a lock, returns with lock. */ static struct dirrem * newdirrem(bp, dp, ip, isrmdir, prevdirremp) struct buf *bp; /* buffer containing directory block */ struct inode *dp; /* inode for the directory being modified */ struct inode *ip; /* inode for directory entry being removed */ int isrmdir; /* indicates if doing RMDIR */ struct dirrem **prevdirremp; /* previously referenced inode, if any */ { int offset; ufs_lbn_t lbn; struct diradd *dap; struct dirrem *dirrem; struct pagedep *pagedep; struct jremref *jremref; struct jremref *dotremref; struct jremref *dotdotremref; struct vnode *dvp; struct ufsmount *ump; /* * Whiteouts have no deletion dependencies. */ if (ip == NULL) panic("newdirrem: whiteout"); dvp = ITOV(dp); ump = ITOUMP(dp); /* * If the system is over its limit and our filesystem is * responsible for more than our share of that usage and * we are not a snapshot, request some inodedep cleanup. * Limiting the number of dirrem structures will also limit * the number of freefile and freeblks structures. */ ACQUIRE_LOCK(ump); if (!IS_SNAPSHOT(ip) && softdep_excess_items(ump, D_DIRREM)) schedule_cleanup(UFSTOVFS(ump)); else FREE_LOCK(ump); dirrem = malloc(sizeof(struct dirrem), M_DIRREM, M_SOFTDEP_FLAGS | M_ZERO); workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount); LIST_INIT(&dirrem->dm_jremrefhd); LIST_INIT(&dirrem->dm_jwork); dirrem->dm_state = isrmdir ? RMDIR : 0; dirrem->dm_oldinum = ip->i_number; *prevdirremp = NULL; /* * Allocate remove reference structures to track journal write * dependencies. We will always have one for the link and * when doing directories we will always have one more for dot. * When renaming a directory we skip the dotdot link change so * this is not needed. */ jremref = dotremref = dotdotremref = NULL; if (DOINGSUJ(dvp)) { if (isrmdir) { jremref = newjremref(dirrem, dp, ip, dp->i_offset, ip->i_effnlink + 2); dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET, ip->i_effnlink + 1); dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET, dp->i_effnlink + 1); dotdotremref->jr_state |= MKDIR_PARENT; } else jremref = newjremref(dirrem, dp, ip, dp->i_offset, ip->i_effnlink + 1); } ACQUIRE_LOCK(ump); lbn = lblkno(ump->um_fs, dp->i_offset); offset = blkoff(ump->um_fs, dp->i_offset); pagedep_lookup(UFSTOVFS(ump), bp, dp->i_number, lbn, DEPALLOC, &pagedep); dirrem->dm_pagedep = pagedep; dirrem->dm_offset = offset; /* * If we're renaming a .. link to a new directory, cancel any * existing MKDIR_PARENT mkdir. If it has already been canceled * the jremref is preserved for any potential diradd in this * location. This can not coincide with a rmdir. */ if (dp->i_offset == DOTDOT_OFFSET) { if (isrmdir) panic("newdirrem: .. directory change during remove?"); jremref = cancel_mkdir_dotdot(dp, dirrem, jremref); } /* * If we're removing a directory search for the .. dependency now and * cancel it. Any pending journal work will be added to the dirrem * to be completed when the workitem remove completes. */ if (isrmdir) dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref); /* * Check for a diradd dependency for the same directory entry. * If present, then both dependencies become obsolete and can * be de-allocated. */ dap = diradd_lookup(pagedep, offset); if (dap == NULL) { /* * Link the jremref structures into the dirrem so they are * written prior to the pagedep. */ if (jremref) dirrem_journal(dirrem, jremref, dotremref, dotdotremref); return (dirrem); } /* * Must be ATTACHED at this point. */ if ((dap->da_state & ATTACHED) == 0) panic("newdirrem: not ATTACHED"); if (dap->da_newinum != ip->i_number) panic("newdirrem: inum %ju should be %ju", (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum); /* * If we are deleting a changed name that never made it to disk, * then return the dirrem describing the previous inode (which * represents the inode currently referenced from this entry on disk). */ if ((dap->da_state & DIRCHG) != 0) { *prevdirremp = dap->da_previous; dap->da_state &= ~DIRCHG; dap->da_pagedep = pagedep; } /* * We are deleting an entry that never made it to disk. * Mark it COMPLETE so we can delete its inode immediately. */ dirrem->dm_state |= COMPLETE; cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref); #ifdef SUJ_DEBUG if (isrmdir == 0) { struct worklist *wk; LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT)) panic("bad wk %p (0x%X)\n", wk, wk->wk_state); } #endif return (dirrem); } /* * Directory entry change dependencies. * * Changing an existing directory entry requires that an add operation * be completed first followed by a deletion. The semantics for the addition * are identical to the description of adding a new entry above except * that the rollback is to the old inode number rather than zero. Once * the addition dependency is completed, the removal is done as described * in the removal routine above. */ /* * This routine should be called immediately after changing * a directory entry. The inode's link count should not be * decremented by the calling procedure -- the soft updates * code will perform this task when it is safe. */ void softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) struct buf *bp; /* buffer containing directory block */ struct inode *dp; /* inode for the directory being modified */ struct inode *ip; /* inode for directory entry being removed */ ino_t newinum; /* new inode number for changed entry */ int isrmdir; /* indicates if doing RMDIR */ { int offset; struct diradd *dap = NULL; struct dirrem *dirrem, *prevdirrem; struct pagedep *pagedep; struct inodedep *inodedep; struct jaddref *jaddref; struct mount *mp; struct ufsmount *ump; mp = ITOVFS(dp); ump = VFSTOUFS(mp); offset = blkoff(ump->um_fs, dp->i_offset); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_directory_change called on non-softdep filesystem")); /* * Whiteouts do not need diradd dependencies. */ if (newinum != UFS_WINO) { dap = malloc(sizeof(struct diradd), M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); workitem_alloc(&dap->da_list, D_DIRADD, mp); dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; dap->da_offset = offset; dap->da_newinum = newinum; LIST_INIT(&dap->da_jwork); } /* * Allocate a new dirrem and ACQUIRE_LOCK. */ dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); pagedep = dirrem->dm_pagedep; /* * The possible values for isrmdir: * 0 - non-directory file rename * 1 - directory rename within same directory * inum - directory rename to new directory of given inode number * When renaming to a new directory, we are both deleting and * creating a new directory entry, so the link count on the new * directory should not change. Thus we do not need the followup * dirrem which is usually done in handle_workitem_remove. We set * the DIRCHG flag to tell handle_workitem_remove to skip the * followup dirrem. */ if (isrmdir > 1) dirrem->dm_state |= DIRCHG; /* * Whiteouts have no additional dependencies, * so just put the dirrem on the correct list. */ if (newinum == UFS_WINO) { if ((dirrem->dm_state & COMPLETE) == 0) { LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, dm_next); } else { dirrem->dm_dirinum = pagedep->pd_ino; if (LIST_EMPTY(&dirrem->dm_jremrefhd)) add_to_worklist(&dirrem->dm_list, 0); } FREE_LOCK(ump); return; } /* * Add the dirrem to the inodedep's pending remove list for quick * discovery later. A valid nlinkdelta ensures that this lookup * will not fail. */ if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) panic("softdep_setup_directory_change: Lost inodedep."); dirrem->dm_state |= ONDEPLIST; LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); /* * If the COMPLETE flag is clear, then there were no active * entries and we want to roll back to the previous inode until * the new inode is committed to disk. If the COMPLETE flag is * set, then we have deleted an entry that never made it to disk. * If the entry we deleted resulted from a name change, then the old * inode reference still resides on disk. Any rollback that we do * needs to be to that old inode (returned to us in prevdirrem). If * the entry we deleted resulted from a create, then there is * no entry on the disk, so we want to roll back to zero rather * than the uncommitted inode. In either of the COMPLETE cases we * want to immediately free the unwritten and unreferenced inode. */ if ((dirrem->dm_state & COMPLETE) == 0) { dap->da_previous = dirrem; } else { if (prevdirrem != NULL) { dap->da_previous = prevdirrem; } else { dap->da_state &= ~DIRCHG; dap->da_pagedep = pagedep; } dirrem->dm_dirinum = pagedep->pd_ino; if (LIST_EMPTY(&dirrem->dm_jremrefhd)) add_to_worklist(&dirrem->dm_list, 0); } /* * Lookup the jaddref for this journal entry. We must finish * initializing it and make the diradd write dependent on it. * If we're not journaling, put it on the id_bufwait list if the * inode is not yet written. If it is written, do the post-inode * write processing to put it on the id_pendinghd list. */ inodedep_lookup(mp, newinum, DEPALLOC, &inodedep); if (MOUNTEDSUJ(mp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, ("softdep_setup_directory_change: bad jaddref %p", jaddref)); jaddref->ja_diroff = dp->i_offset; jaddref->ja_diradd = dap; LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, da_pdlist); add_to_journal(&jaddref->ja_list); } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { dap->da_state |= COMPLETE; LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); } else { LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, da_pdlist); WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); } /* * If we're making a new name for a directory that has not been * committed when need to move the dot and dotdot references to * this new name. */ if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET) merge_diradd(inodedep, dap); FREE_LOCK(ump); } /* * Called whenever the link count on an inode is changed. * It creates an inode dependency so that the new reference(s) * to the inode cannot be committed to disk until the updated * inode has been written. */ void softdep_change_linkcnt(ip) struct inode *ip; /* the inode with the increased link count */ { struct inodedep *inodedep; struct ufsmount *ump; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_change_linkcnt called on non-softdep filesystem")); ACQUIRE_LOCK(ump); inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep); if (ip->i_nlink < ip->i_effnlink) panic("softdep_change_linkcnt: bad delta"); inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; FREE_LOCK(ump); } /* * Attach a sbdep dependency to the superblock buf so that we can keep * track of the head of the linked list of referenced but unlinked inodes. */ void softdep_setup_sbupdate(ump, fs, bp) struct ufsmount *ump; struct fs *fs; struct buf *bp; { struct sbdep *sbdep; struct worklist *wk; KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_setup_sbupdate called on non-softdep filesystem")); LIST_FOREACH(wk, &bp->b_dep, wk_list) if (wk->wk_type == D_SBDEP) break; if (wk != NULL) return; sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS); workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump)); sbdep->sb_fs = fs; sbdep->sb_ump = ump; ACQUIRE_LOCK(ump); WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list); FREE_LOCK(ump); } /* * Return the first unlinked inodedep which is ready to be the head of the * list. The inodedep and all those after it must have valid next pointers. */ static struct inodedep * first_unlinked_inodedep(ump) struct ufsmount *ump; { struct inodedep *inodedep; struct inodedep *idp; LOCK_OWNED(ump); for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst); inodedep; inodedep = idp) { if ((inodedep->id_state & UNLINKNEXT) == 0) return (NULL); idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0) break; if ((inodedep->id_state & UNLINKPREV) == 0) break; } return (inodedep); } /* * Set the sujfree unlinked head pointer prior to writing a superblock. */ static void initiate_write_sbdep(sbdep) struct sbdep *sbdep; { struct inodedep *inodedep; struct fs *bpfs; struct fs *fs; bpfs = sbdep->sb_fs; fs = sbdep->sb_ump->um_fs; inodedep = first_unlinked_inodedep(sbdep->sb_ump); if (inodedep) { fs->fs_sujfree = inodedep->id_ino; inodedep->id_state |= UNLINKPREV; } else fs->fs_sujfree = 0; bpfs->fs_sujfree = fs->fs_sujfree; } /* * After a superblock is written determine whether it must be written again * due to a changing unlinked list head. */ static int handle_written_sbdep(sbdep, bp) struct sbdep *sbdep; struct buf *bp; { struct inodedep *inodedep; struct fs *fs; LOCK_OWNED(sbdep->sb_ump); fs = sbdep->sb_fs; /* * If the superblock doesn't match the in-memory list start over. */ inodedep = first_unlinked_inodedep(sbdep->sb_ump); if ((inodedep && fs->fs_sujfree != inodedep->id_ino) || (inodedep == NULL && fs->fs_sujfree != 0)) { bdirty(bp); return (1); } WORKITEM_FREE(sbdep, D_SBDEP); if (fs->fs_sujfree == 0) return (0); /* * Now that we have a record of this inode in stable store allow it * to be written to free up pending work. Inodes may see a lot of * write activity after they are unlinked which we must not hold up. */ for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS) panic("handle_written_sbdep: Bad inodedep %p (0x%X)", inodedep, inodedep->id_state); if (inodedep->id_state & UNLINKONLIST) break; inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST; } return (0); } /* * Mark an inodedep as unlinked and insert it into the in-memory unlinked list. */ static void unlinked_inodedep(mp, inodedep) struct mount *mp; struct inodedep *inodedep; { struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); if (MOUNTEDSUJ(mp) == 0) return; ump->um_fs->fs_fmod = 1; if (inodedep->id_state & UNLINKED) panic("unlinked_inodedep: %p already unlinked\n", inodedep); inodedep->id_state |= UNLINKED; TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked); } /* * Remove an inodedep from the unlinked inodedep list. This may require * disk writes if the inode has made it that far. */ static void clear_unlinked_inodedep(inodedep) struct inodedep *inodedep; { struct ufsmount *ump; struct inodedep *idp; struct inodedep *idn; struct fs *fs; struct buf *bp; ino_t ino; ino_t nino; ino_t pino; int error; ump = VFSTOUFS(inodedep->id_list.wk_mp); fs = ump->um_fs; ino = inodedep->id_ino; error = 0; for (;;) { LOCK_OWNED(ump); KASSERT((inodedep->id_state & UNLINKED) != 0, ("clear_unlinked_inodedep: inodedep %p not unlinked", inodedep)); /* * If nothing has yet been written simply remove us from * the in memory list and return. This is the most common * case where handle_workitem_remove() loses the final * reference. */ if ((inodedep->id_state & UNLINKLINKS) == 0) break; /* * If we have a NEXT pointer and no PREV pointer we can simply * clear NEXT's PREV and remove ourselves from the list. Be * careful not to clear PREV if the superblock points at * next as well. */ idn = TAILQ_NEXT(inodedep, id_unlinked); if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) { if (idn && fs->fs_sujfree != idn->id_ino) idn->id_state &= ~UNLINKPREV; break; } /* * Here we have an inodedep which is actually linked into * the list. We must remove it by forcing a write to the * link before us, whether it be the superblock or an inode. * Unfortunately the list may change while we're waiting * on the buf lock for either resource so we must loop until * we lock the right one. If both the superblock and an * inode point to this inode we must clear the inode first * followed by the superblock. */ idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); pino = 0; if (idp && (idp->id_state & UNLINKNEXT)) pino = idp->id_ino; FREE_LOCK(ump); if (pino == 0) { bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), (int)fs->fs_sbsize, 0, 0, 0); } else { error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, pino)), (int)fs->fs_bsize, NOCRED, &bp); if (error) brelse(bp); } ACQUIRE_LOCK(ump); if (error) break; /* If the list has changed restart the loop. */ idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); nino = 0; if (idp && (idp->id_state & UNLINKNEXT)) nino = idp->id_ino; if (nino != pino || (inodedep->id_state & UNLINKPREV) != UNLINKPREV) { FREE_LOCK(ump); brelse(bp); ACQUIRE_LOCK(ump); continue; } nino = 0; idn = TAILQ_NEXT(inodedep, id_unlinked); if (idn) nino = idn->id_ino; /* * Remove us from the in memory list. After this we cannot * access the inodedep. */ KASSERT((inodedep->id_state & UNLINKED) != 0, ("clear_unlinked_inodedep: inodedep %p not unlinked", inodedep)); inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); FREE_LOCK(ump); /* * The predecessor's next pointer is manually updated here * so that the NEXT flag is never cleared for an element * that is in the list. */ if (pino == 0) { bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); ffs_oldfscompat_write((struct fs *)bp->b_data, ump); softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); } else if (fs->fs_magic == FS_UFS1_MAGIC) ((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, pino))->di_freelink = nino; else ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, pino))->di_freelink = nino; /* * If the bwrite fails we have no recourse to recover. The * filesystem is corrupted already. */ bwrite(bp); ACQUIRE_LOCK(ump); /* * If the superblock pointer still needs to be cleared force * a write here. */ if (fs->fs_sujfree == ino) { FREE_LOCK(ump); bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), (int)fs->fs_sbsize, 0, 0, 0); bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); ffs_oldfscompat_write((struct fs *)bp->b_data, ump); softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); bwrite(bp); ACQUIRE_LOCK(ump); } if (fs->fs_sujfree != ino) return; panic("clear_unlinked_inodedep: Failed to clear free head"); } if (inodedep->id_ino == fs->fs_sujfree) panic("clear_unlinked_inodedep: Freeing head of free list"); inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); return; } /* * This workitem decrements the inode's link count. * If the link count reaches zero, the file is removed. */ static int handle_workitem_remove(dirrem, flags) struct dirrem *dirrem; int flags; { struct inodedep *inodedep; struct workhead dotdotwk; struct worklist *wk; struct ufsmount *ump; struct mount *mp; struct vnode *vp; struct inode *ip; ino_t oldinum; if (dirrem->dm_state & ONWORKLIST) panic("handle_workitem_remove: dirrem %p still on worklist", dirrem); oldinum = dirrem->dm_oldinum; mp = dirrem->dm_list.wk_mp; ump = VFSTOUFS(mp); flags |= LK_EXCLUSIVE; if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0) return (EBUSY); ip = VTOI(vp); ACQUIRE_LOCK(ump); if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0) panic("handle_workitem_remove: lost inodedep"); if (dirrem->dm_state & ONDEPLIST) LIST_REMOVE(dirrem, dm_inonext); KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), ("handle_workitem_remove: Journal entries not written.")); /* * Move all dependencies waiting on the remove to complete * from the dirrem to the inode inowait list to be completed * after the inode has been updated and written to disk. Any * marked MKDIR_PARENT are saved to be completed when the .. ref * is removed. */ LIST_INIT(&dotdotwk); while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) { WORKLIST_REMOVE(wk); if (wk->wk_state & MKDIR_PARENT) { wk->wk_state &= ~MKDIR_PARENT; WORKLIST_INSERT(&dotdotwk, wk); continue; } WORKLIST_INSERT(&inodedep->id_inowait, wk); } LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list); /* * Normal file deletion. */ if ((dirrem->dm_state & RMDIR) == 0) { ip->i_nlink--; DIP_SET(ip, i_nlink, ip->i_nlink); ip->i_flag |= IN_CHANGE; if (ip->i_nlink < ip->i_effnlink) panic("handle_workitem_remove: bad file delta"); if (ip->i_nlink == 0) unlinked_inodedep(mp, inodedep); inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; KASSERT(LIST_EMPTY(&dirrem->dm_jwork), ("handle_workitem_remove: worklist not empty. %s", TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type))); WORKITEM_FREE(dirrem, D_DIRREM); FREE_LOCK(ump); goto out; } /* * Directory deletion. Decrement reference count for both the * just deleted parent directory entry and the reference for ".". * Arrange to have the reference count on the parent decremented * to account for the loss of "..". */ ip->i_nlink -= 2; DIP_SET(ip, i_nlink, ip->i_nlink); ip->i_flag |= IN_CHANGE; if (ip->i_nlink < ip->i_effnlink) panic("handle_workitem_remove: bad dir delta"); if (ip->i_nlink == 0) unlinked_inodedep(mp, inodedep); inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; /* * Rename a directory to a new parent. Since, we are both deleting * and creating a new directory entry, the link count on the new * directory should not change. Thus we skip the followup dirrem. */ if (dirrem->dm_state & DIRCHG) { KASSERT(LIST_EMPTY(&dirrem->dm_jwork), ("handle_workitem_remove: DIRCHG and worklist not empty.")); WORKITEM_FREE(dirrem, D_DIRREM); FREE_LOCK(ump); goto out; } dirrem->dm_state = ONDEPLIST; dirrem->dm_oldinum = dirrem->dm_dirinum; /* * Place the dirrem on the parent's diremhd list. */ if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0) panic("handle_workitem_remove: lost dir inodedep"); LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); /* * If the allocated inode has never been written to disk, then * the on-disk inode is zero'ed and we can remove the file * immediately. When journaling if the inode has been marked * unlinked and not DEPCOMPLETE we know it can never be written. */ inodedep_lookup(mp, oldinum, 0, &inodedep); if (inodedep == NULL || (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED || check_inode_unwritten(inodedep)) { FREE_LOCK(ump); vput(vp); return handle_workitem_remove(dirrem, flags); } WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); FREE_LOCK(ump); ip->i_flag |= IN_CHANGE; out: ffs_update(vp, 0); vput(vp); return (0); } /* * Inode de-allocation dependencies. * * When an inode's link count is reduced to zero, it can be de-allocated. We * found it convenient to postpone de-allocation until after the inode is * written to disk with its new link count (zero). At this point, all of the * on-disk inode's block pointers are nullified and, with careful dependency * list ordering, all dependencies related to the inode will be satisfied and * the corresponding dependency structures de-allocated. So, if/when the * inode is reused, there will be no mixing of old dependencies with new * ones. This artificial dependency is set up by the block de-allocation * procedure above (softdep_setup_freeblocks) and completed by the * following procedure. */ static void handle_workitem_freefile(freefile) struct freefile *freefile; { struct workhead wkhd; struct fs *fs; struct inodedep *idp; struct ufsmount *ump; int error; ump = VFSTOUFS(freefile->fx_list.wk_mp); fs = ump->um_fs; #ifdef DEBUG ACQUIRE_LOCK(ump); error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp); FREE_LOCK(ump); if (error) panic("handle_workitem_freefile: inodedep %p survived", idp); #endif UFS_LOCK(ump); fs->fs_pendinginodes -= 1; UFS_UNLOCK(ump); LIST_INIT(&wkhd); LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list); if ((error = ffs_freefile(ump, fs, freefile->fx_devvp, freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0) softdep_error("handle_workitem_freefile", error); ACQUIRE_LOCK(ump); WORKITEM_FREE(freefile, D_FREEFILE); FREE_LOCK(ump); } /* * Helper function which unlinks marker element from work list and returns * the next element on the list. */ static __inline struct worklist * markernext(struct worklist *marker) { struct worklist *next; next = LIST_NEXT(marker, wk_list); LIST_REMOVE(marker, wk_list); return next; } /* * Disk writes. * * The dependency structures constructed above are most actively used when file * system blocks are written to disk. No constraints are placed on when a * block can be written, but unsatisfied update dependencies are made safe by * modifying (or replacing) the source memory for the duration of the disk * write. When the disk write completes, the memory block is again brought * up-to-date. * * In-core inode structure reclamation. * * Because there are a finite number of "in-core" inode structures, they are * reused regularly. By transferring all inode-related dependencies to the * in-memory inode block and indexing them separately (via "inodedep"s), we * can allow "in-core" inode structures to be reused at any time and avoid * any increase in contention. * * Called just before entering the device driver to initiate a new disk I/O. * The buffer must be locked, thus, no I/O completion operations can occur * while we are manipulating its associated dependencies. */ static void softdep_disk_io_initiation(bp) struct buf *bp; /* structure describing disk write to occur */ { struct worklist *wk; struct worklist marker; struct inodedep *inodedep; struct freeblks *freeblks; struct jblkdep *jblkdep; struct newblk *newblk; struct ufsmount *ump; /* * We only care about write operations. There should never * be dependencies for reads. */ if (bp->b_iocmd != BIO_WRITE) panic("softdep_disk_io_initiation: not write"); if (bp->b_vflags & BV_BKGRDINPROG) panic("softdep_disk_io_initiation: Writing buffer with " "background write in progress: %p", bp); if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) return; ump = VFSTOUFS(wk->wk_mp); marker.wk_type = D_LAST + 1; /* Not a normal workitem */ PHOLD(curproc); /* Don't swap out kernel stack */ ACQUIRE_LOCK(ump); /* * Do any necessary pre-I/O processing. */ for (wk = LIST_FIRST(&bp->b_dep); wk != NULL; wk = markernext(&marker)) { LIST_INSERT_AFTER(wk, &marker, wk_list); switch (wk->wk_type) { case D_PAGEDEP: initiate_write_filepage(WK_PAGEDEP(wk), bp); continue; case D_INODEDEP: inodedep = WK_INODEDEP(wk); if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) initiate_write_inodeblock_ufs1(inodedep, bp); else initiate_write_inodeblock_ufs2(inodedep, bp); continue; case D_INDIRDEP: initiate_write_indirdep(WK_INDIRDEP(wk), bp); continue; case D_BMSAFEMAP: initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp); continue; case D_JSEG: WK_JSEG(wk)->js_buf = NULL; continue; case D_FREEBLKS: freeblks = WK_FREEBLKS(wk); jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd); /* * We have to wait for the freeblks to be journaled * before we can write an inodeblock with updated * pointers. Be careful to arrange the marker so * we revisit the freeblks if it's not removed by * the first jwait(). */ if (jblkdep != NULL) { LIST_REMOVE(&marker, wk_list); LIST_INSERT_BEFORE(wk, &marker, wk_list); jwait(&jblkdep->jb_list, MNT_WAIT); } continue; case D_ALLOCDIRECT: case D_ALLOCINDIR: /* * We have to wait for the jnewblk to be journaled * before we can write to a block if the contents * may be confused with an earlier file's indirect * at recovery time. Handle the marker as described * above. */ newblk = WK_NEWBLK(wk); if (newblk->nb_jnewblk != NULL && indirblk_lookup(newblk->nb_list.wk_mp, newblk->nb_newblkno)) { LIST_REMOVE(&marker, wk_list); LIST_INSERT_BEFORE(wk, &marker, wk_list); jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); } continue; case D_SBDEP: initiate_write_sbdep(WK_SBDEP(wk)); continue; case D_MKDIR: case D_FREEWORK: case D_FREEDEP: case D_JSEGDEP: continue; default: panic("handle_disk_io_initiation: Unexpected type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } FREE_LOCK(ump); PRELE(curproc); /* Allow swapout of kernel stack */ } /* * Called from within the procedure above to deal with unsatisfied * allocation dependencies in a directory. The buffer must be locked, * thus, no I/O completion operations can occur while we are * manipulating its associated dependencies. */ static void initiate_write_filepage(pagedep, bp) struct pagedep *pagedep; struct buf *bp; { struct jremref *jremref; struct jmvref *jmvref; struct dirrem *dirrem; struct diradd *dap; struct direct *ep; int i; if (pagedep->pd_state & IOSTARTED) { /* * This can only happen if there is a driver that does not * understand chaining. Here biodone will reissue the call * to strategy for the incomplete buffers. */ printf("initiate_write_filepage: already started\n"); return; } pagedep->pd_state |= IOSTARTED; /* * Wait for all journal remove dependencies to hit the disk. * We can not allow any potentially conflicting directory adds * to be visible before removes and rollback is too difficult. * The per-filesystem lock may be dropped and re-acquired, however * we hold the buf locked so the dependency can not go away. */ LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) jwait(&jremref->jr_list, MNT_WAIT); while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) jwait(&jmvref->jm_list, MNT_WAIT); for (i = 0; i < DAHASHSZ; i++) { LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { ep = (struct direct *) ((char *)bp->b_data + dap->da_offset); if (ep->d_ino != dap->da_newinum) panic("%s: dir inum %ju != new %ju", "initiate_write_filepage", (uintmax_t)ep->d_ino, (uintmax_t)dap->da_newinum); if (dap->da_state & DIRCHG) ep->d_ino = dap->da_previous->dm_oldinum; else ep->d_ino = 0; dap->da_state &= ~ATTACHED; dap->da_state |= UNDONE; } } } /* * Version of initiate_write_inodeblock that handles UFS1 dinodes. * Note that any bug fixes made to this routine must be done in the * version found below. * * Called from within the procedure above to deal with unsatisfied * allocation dependencies in an inodeblock. The buffer must be * locked, thus, no I/O completion operations can occur while we * are manipulating its associated dependencies. */ static void initiate_write_inodeblock_ufs1(inodedep, bp) struct inodedep *inodedep; struct buf *bp; /* The inode block */ { struct allocdirect *adp, *lastadp; struct ufs1_dinode *dp; struct ufs1_dinode *sip; struct inoref *inoref; struct ufsmount *ump; struct fs *fs; ufs_lbn_t i; #ifdef INVARIANTS ufs_lbn_t prevlbn = 0; #endif int deplist; if (inodedep->id_state & IOSTARTED) panic("initiate_write_inodeblock_ufs1: already started"); inodedep->id_state |= IOSTARTED; fs = inodedep->id_fs; ump = VFSTOUFS(inodedep->id_list.wk_mp); LOCK_OWNED(ump); dp = (struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, inodedep->id_ino); /* * If we're on the unlinked list but have not yet written our * next pointer initialize it here. */ if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { struct inodedep *inon; inon = TAILQ_NEXT(inodedep, id_unlinked); dp->di_freelink = inon ? inon->id_ino : 0; } /* * If the bitmap is not yet written, then the allocated * inode cannot be written to disk. */ if ((inodedep->id_state & DEPCOMPLETE) == 0) { if (inodedep->id_savedino1 != NULL) panic("initiate_write_inodeblock_ufs1: I/O underway"); FREE_LOCK(ump); sip = malloc(sizeof(struct ufs1_dinode), M_SAVEDINO, M_SOFTDEP_FLAGS); ACQUIRE_LOCK(ump); inodedep->id_savedino1 = sip; *inodedep->id_savedino1 = *dp; bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); dp->di_gen = inodedep->id_savedino1->di_gen; dp->di_freelink = inodedep->id_savedino1->di_freelink; return; } /* * If no dependencies, then there is nothing to roll back. */ inodedep->id_savedsize = dp->di_size; inodedep->id_savedextsize = 0; inodedep->id_savednlink = dp->di_nlink; if (TAILQ_EMPTY(&inodedep->id_inoupdt) && TAILQ_EMPTY(&inodedep->id_inoreflst)) return; /* * Revert the link count to that of the first unwritten journal entry. */ inoref = TAILQ_FIRST(&inodedep->id_inoreflst); if (inoref) dp->di_nlink = inoref->if_nlink; /* * Set the dependencies to busy. */ for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = TAILQ_NEXT(adp, ad_next)) { #ifdef INVARIANTS if (deplist != 0 && prevlbn >= adp->ad_offset) panic("softdep_write_inodeblock: lbn order"); prevlbn = adp->ad_offset; if (adp->ad_offset < UFS_NDADDR && dp->di_db[adp->ad_offset] != adp->ad_newblkno) panic("%s: direct pointer #%jd mismatch %d != %jd", "softdep_write_inodeblock", (intmax_t)adp->ad_offset, dp->di_db[adp->ad_offset], (intmax_t)adp->ad_newblkno); if (adp->ad_offset >= UFS_NDADDR && dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno) panic("%s: indirect pointer #%jd mismatch %d != %jd", "softdep_write_inodeblock", (intmax_t)adp->ad_offset - UFS_NDADDR, dp->di_ib[adp->ad_offset - UFS_NDADDR], (intmax_t)adp->ad_newblkno); deplist |= 1 << adp->ad_offset; if ((adp->ad_state & ATTACHED) == 0) panic("softdep_write_inodeblock: Unknown state 0x%x", adp->ad_state); #endif /* INVARIANTS */ adp->ad_state &= ~ATTACHED; adp->ad_state |= UNDONE; } /* * The on-disk inode cannot claim to be any larger than the last * fragment that has been written. Otherwise, the on-disk inode * might have fragments that were not the last block in the file * which would corrupt the filesystem. */ for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { if (adp->ad_offset >= UFS_NDADDR) break; dp->di_db[adp->ad_offset] = adp->ad_oldblkno; /* keep going until hitting a rollback to a frag */ if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) continue; dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) { #ifdef INVARIANTS if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) panic("softdep_write_inodeblock: lost dep1"); #endif /* INVARIANTS */ dp->di_db[i] = 0; } for (i = 0; i < UFS_NIADDR; i++) { #ifdef INVARIANTS if (dp->di_ib[i] != 0 && (deplist & ((1 << UFS_NDADDR) << i)) == 0) panic("softdep_write_inodeblock: lost dep2"); #endif /* INVARIANTS */ dp->di_ib[i] = 0; } return; } /* * If we have zero'ed out the last allocated block of the file, * roll back the size to the last currently allocated block. * We know that this last allocated block is a full-sized as * we already checked for fragments in the loop above. */ if (lastadp != NULL && dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { for (i = lastadp->ad_offset; i >= 0; i--) if (dp->di_db[i] != 0) break; dp->di_size = (i + 1) * fs->fs_bsize; } /* * The only dependencies are for indirect blocks. * * The file size for indirect block additions is not guaranteed. * Such a guarantee would be non-trivial to achieve. The conventional * synchronous write implementation also does not make this guarantee. * Fsck should catch and fix discrepancies. Arguably, the file size * can be over-estimated without destroying integrity when the file * moves into the indirect blocks (i.e., is large). If we want to * postpone fsck, we are stuck with this argument. */ for (; adp; adp = TAILQ_NEXT(adp, ad_next)) dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0; } /* * Version of initiate_write_inodeblock that handles UFS2 dinodes. * Note that any bug fixes made to this routine must be done in the * version found above. * * Called from within the procedure above to deal with unsatisfied * allocation dependencies in an inodeblock. The buffer must be * locked, thus, no I/O completion operations can occur while we * are manipulating its associated dependencies. */ static void initiate_write_inodeblock_ufs2(inodedep, bp) struct inodedep *inodedep; struct buf *bp; /* The inode block */ { struct allocdirect *adp, *lastadp; struct ufs2_dinode *dp; struct ufs2_dinode *sip; struct inoref *inoref; struct ufsmount *ump; struct fs *fs; ufs_lbn_t i; #ifdef INVARIANTS ufs_lbn_t prevlbn = 0; #endif int deplist; if (inodedep->id_state & IOSTARTED) panic("initiate_write_inodeblock_ufs2: already started"); inodedep->id_state |= IOSTARTED; fs = inodedep->id_fs; ump = VFSTOUFS(inodedep->id_list.wk_mp); LOCK_OWNED(ump); dp = (struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, inodedep->id_ino); /* * If we're on the unlinked list but have not yet written our * next pointer initialize it here. */ if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { struct inodedep *inon; inon = TAILQ_NEXT(inodedep, id_unlinked); dp->di_freelink = inon ? inon->id_ino : 0; } /* * If the bitmap is not yet written, then the allocated * inode cannot be written to disk. */ if ((inodedep->id_state & DEPCOMPLETE) == 0) { if (inodedep->id_savedino2 != NULL) panic("initiate_write_inodeblock_ufs2: I/O underway"); FREE_LOCK(ump); sip = malloc(sizeof(struct ufs2_dinode), M_SAVEDINO, M_SOFTDEP_FLAGS); ACQUIRE_LOCK(ump); inodedep->id_savedino2 = sip; *inodedep->id_savedino2 = *dp; bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); dp->di_gen = inodedep->id_savedino2->di_gen; dp->di_freelink = inodedep->id_savedino2->di_freelink; return; } /* * If no dependencies, then there is nothing to roll back. */ inodedep->id_savedsize = dp->di_size; inodedep->id_savedextsize = dp->di_extsize; inodedep->id_savednlink = dp->di_nlink; if (TAILQ_EMPTY(&inodedep->id_inoupdt) && TAILQ_EMPTY(&inodedep->id_extupdt) && TAILQ_EMPTY(&inodedep->id_inoreflst)) return; /* * Revert the link count to that of the first unwritten journal entry. */ inoref = TAILQ_FIRST(&inodedep->id_inoreflst); if (inoref) dp->di_nlink = inoref->if_nlink; /* * Set the ext data dependencies to busy. */ for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = TAILQ_NEXT(adp, ad_next)) { #ifdef INVARIANTS if (deplist != 0 && prevlbn >= adp->ad_offset) panic("softdep_write_inodeblock: lbn order"); prevlbn = adp->ad_offset; if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno) panic("%s: direct pointer #%jd mismatch %jd != %jd", "softdep_write_inodeblock", (intmax_t)adp->ad_offset, (intmax_t)dp->di_extb[adp->ad_offset], (intmax_t)adp->ad_newblkno); deplist |= 1 << adp->ad_offset; if ((adp->ad_state & ATTACHED) == 0) panic("softdep_write_inodeblock: Unknown state 0x%x", adp->ad_state); #endif /* INVARIANTS */ adp->ad_state &= ~ATTACHED; adp->ad_state |= UNDONE; } /* * The on-disk inode cannot claim to be any larger than the last * fragment that has been written. Otherwise, the on-disk inode * might have fragments that were not the last block in the ext * data which would corrupt the filesystem. */ for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { dp->di_extb[adp->ad_offset] = adp->ad_oldblkno; /* keep going until hitting a rollback to a frag */ if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) continue; dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; for (i = adp->ad_offset + 1; i < UFS_NXADDR; i++) { #ifdef INVARIANTS if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) panic("softdep_write_inodeblock: lost dep1"); #endif /* INVARIANTS */ dp->di_extb[i] = 0; } lastadp = NULL; break; } /* * If we have zero'ed out the last allocated block of the ext * data, roll back the size to the last currently allocated block. * We know that this last allocated block is a full-sized as * we already checked for fragments in the loop above. */ if (lastadp != NULL && dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) { for (i = lastadp->ad_offset; i >= 0; i--) if (dp->di_extb[i] != 0) break; dp->di_extsize = (i + 1) * fs->fs_bsize; } /* * Set the file data dependencies to busy. */ for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = TAILQ_NEXT(adp, ad_next)) { #ifdef INVARIANTS if (deplist != 0 && prevlbn >= adp->ad_offset) panic("softdep_write_inodeblock: lbn order"); if ((adp->ad_state & ATTACHED) == 0) panic("inodedep %p and adp %p not attached", inodedep, adp); prevlbn = adp->ad_offset; if (adp->ad_offset < UFS_NDADDR && dp->di_db[adp->ad_offset] != adp->ad_newblkno) panic("%s: direct pointer #%jd mismatch %jd != %jd", "softdep_write_inodeblock", (intmax_t)adp->ad_offset, (intmax_t)dp->di_db[adp->ad_offset], (intmax_t)adp->ad_newblkno); if (adp->ad_offset >= UFS_NDADDR && dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno) panic("%s indirect pointer #%jd mismatch %jd != %jd", "softdep_write_inodeblock:", (intmax_t)adp->ad_offset - UFS_NDADDR, (intmax_t)dp->di_ib[adp->ad_offset - UFS_NDADDR], (intmax_t)adp->ad_newblkno); deplist |= 1 << adp->ad_offset; if ((adp->ad_state & ATTACHED) == 0) panic("softdep_write_inodeblock: Unknown state 0x%x", adp->ad_state); #endif /* INVARIANTS */ adp->ad_state &= ~ATTACHED; adp->ad_state |= UNDONE; } /* * The on-disk inode cannot claim to be any larger than the last * fragment that has been written. Otherwise, the on-disk inode * might have fragments that were not the last block in the file * which would corrupt the filesystem. */ for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { if (adp->ad_offset >= UFS_NDADDR) break; dp->di_db[adp->ad_offset] = adp->ad_oldblkno; /* keep going until hitting a rollback to a frag */ if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) continue; dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) { #ifdef INVARIANTS if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) panic("softdep_write_inodeblock: lost dep2"); #endif /* INVARIANTS */ dp->di_db[i] = 0; } for (i = 0; i < UFS_NIADDR; i++) { #ifdef INVARIANTS if (dp->di_ib[i] != 0 && (deplist & ((1 << UFS_NDADDR) << i)) == 0) panic("softdep_write_inodeblock: lost dep3"); #endif /* INVARIANTS */ dp->di_ib[i] = 0; } return; } /* * If we have zero'ed out the last allocated block of the file, * roll back the size to the last currently allocated block. * We know that this last allocated block is a full-sized as * we already checked for fragments in the loop above. */ if (lastadp != NULL && dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { for (i = lastadp->ad_offset; i >= 0; i--) if (dp->di_db[i] != 0) break; dp->di_size = (i + 1) * fs->fs_bsize; } /* * The only dependencies are for indirect blocks. * * The file size for indirect block additions is not guaranteed. * Such a guarantee would be non-trivial to achieve. The conventional * synchronous write implementation also does not make this guarantee. * Fsck should catch and fix discrepancies. Arguably, the file size * can be over-estimated without destroying integrity when the file * moves into the indirect blocks (i.e., is large). If we want to * postpone fsck, we are stuck with this argument. */ for (; adp; adp = TAILQ_NEXT(adp, ad_next)) dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0; } /* * Cancel an indirdep as a result of truncation. Release all of the * children allocindirs and place their journal work on the appropriate * list. */ static void cancel_indirdep(indirdep, bp, freeblks) struct indirdep *indirdep; struct buf *bp; struct freeblks *freeblks; { struct allocindir *aip; /* * None of the indirect pointers will ever be visible, * so they can simply be tossed. GOINGAWAY ensures * that allocated pointers will be saved in the buffer * cache until they are freed. Note that they will * only be able to be found by their physical address * since the inode mapping the logical address will * be gone. The save buffer used for the safe copy * was allocated in setup_allocindir_phase2 using * the physical address so it could be used for this * purpose. Hence we swap the safe copy with the real * copy, allowing the safe copy to be freed and holding * on to the real copy for later use in indir_trunc. */ if (indirdep->ir_state & GOINGAWAY) panic("cancel_indirdep: already gone"); if ((indirdep->ir_state & DEPCOMPLETE) == 0) { indirdep->ir_state |= DEPCOMPLETE; LIST_REMOVE(indirdep, ir_next); } indirdep->ir_state |= GOINGAWAY; /* * Pass in bp for blocks still have journal writes * pending so we can cancel them on their own. */ while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL) cancel_allocindir(aip, bp, freeblks, 0); while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) cancel_allocindir(aip, NULL, freeblks, 0); while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) cancel_allocindir(aip, NULL, freeblks, 0); while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) cancel_allocindir(aip, NULL, freeblks, 0); /* * If there are pending partial truncations we need to keep the * old block copy around until they complete. This is because * the current b_data is not a perfect superset of the available * blocks. */ if (TAILQ_EMPTY(&indirdep->ir_trunc)) bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount); else bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); WORKLIST_REMOVE(&indirdep->ir_list); WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list); indirdep->ir_bp = NULL; indirdep->ir_freeblks = freeblks; } /* * Free an indirdep once it no longer has new pointers to track. */ static void free_indirdep(indirdep) struct indirdep *indirdep; { KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc), ("free_indirdep: Indir trunc list not empty.")); KASSERT(LIST_EMPTY(&indirdep->ir_completehd), ("free_indirdep: Complete head not empty.")); KASSERT(LIST_EMPTY(&indirdep->ir_writehd), ("free_indirdep: write head not empty.")); KASSERT(LIST_EMPTY(&indirdep->ir_donehd), ("free_indirdep: done head not empty.")); KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd), ("free_indirdep: deplist head not empty.")); KASSERT((indirdep->ir_state & DEPCOMPLETE), ("free_indirdep: %p still on newblk list.", indirdep)); KASSERT(indirdep->ir_saveddata == NULL, ("free_indirdep: %p still has saved data.", indirdep)); if (indirdep->ir_state & ONWORKLIST) WORKLIST_REMOVE(&indirdep->ir_list); WORKITEM_FREE(indirdep, D_INDIRDEP); } /* * Called before a write to an indirdep. This routine is responsible for * rolling back pointers to a safe state which includes only those * allocindirs which have been completed. */ static void initiate_write_indirdep(indirdep, bp) struct indirdep *indirdep; struct buf *bp; { struct ufsmount *ump; indirdep->ir_state |= IOSTARTED; if (indirdep->ir_state & GOINGAWAY) panic("disk_io_initiation: indirdep gone"); /* * If there are no remaining dependencies, this will be writing * the real pointers. */ if (LIST_EMPTY(&indirdep->ir_deplisthd) && TAILQ_EMPTY(&indirdep->ir_trunc)) return; /* * Replace up-to-date version with safe version. */ if (indirdep->ir_saveddata == NULL) { ump = VFSTOUFS(indirdep->ir_list.wk_mp); LOCK_OWNED(ump); FREE_LOCK(ump); indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, M_SOFTDEP_FLAGS); ACQUIRE_LOCK(ump); } indirdep->ir_state &= ~ATTACHED; indirdep->ir_state |= UNDONE; bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); bcopy(indirdep->ir_savebp->b_data, bp->b_data, bp->b_bcount); } /* * Called when an inode has been cleared in a cg bitmap. This finally * eliminates any canceled jaddrefs */ void softdep_setup_inofree(mp, bp, ino, wkhd) struct mount *mp; struct buf *bp; ino_t ino; struct workhead *wkhd; { struct worklist *wk, *wkn; struct inodedep *inodedep; struct ufsmount *ump; uint8_t *inosused; struct cg *cgp; struct fs *fs; KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_inofree called on non-softdep filesystem")); ump = VFSTOUFS(mp); ACQUIRE_LOCK(ump); fs = ump->um_fs; cgp = (struct cg *)bp->b_data; inosused = cg_inosused(cgp); if (isset(inosused, ino % fs->fs_ipg)) panic("softdep_setup_inofree: inode %ju not freed.", (uintmax_t)ino); if (inodedep_lookup(mp, ino, 0, &inodedep)) panic("softdep_setup_inofree: ino %ju has existing inodedep %p", (uintmax_t)ino, inodedep); if (wkhd) { LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) { if (wk->wk_type != D_JADDREF) continue; WORKLIST_REMOVE(wk); /* * We can free immediately even if the jaddref * isn't attached in a background write as now * the bitmaps are reconciled. */ wk->wk_state |= COMPLETE | ATTACHED; free_jaddref(WK_JADDREF(wk)); } jwork_move(&bp->b_dep, wkhd); } FREE_LOCK(ump); } /* * Called via ffs_blkfree() after a set of frags has been cleared from a cg * map. Any dependencies waiting for the write to clear are added to the * buf's list and any jnewblks that are being canceled are discarded * immediately. */ void softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) struct mount *mp; struct buf *bp; ufs2_daddr_t blkno; int frags; struct workhead *wkhd; { struct bmsafemap *bmsafemap; struct jnewblk *jnewblk; struct ufsmount *ump; struct worklist *wk; struct fs *fs; #ifdef SUJ_DEBUG uint8_t *blksfree; struct cg *cgp; ufs2_daddr_t jstart; ufs2_daddr_t jend; ufs2_daddr_t end; long bno; int i; #endif CTR3(KTR_SUJ, "softdep_setup_blkfree: blkno %jd frags %d wk head %p", blkno, frags, wkhd); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_setup_blkfree called on non-softdep filesystem")); ACQUIRE_LOCK(ump); /* Lookup the bmsafemap so we track when it is dirty. */ fs = ump->um_fs; bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); /* * Detach any jnewblks which have been canceled. They must linger * until the bitmap is cleared again by ffs_blkfree() to prevent * an unjournaled allocation from hitting the disk. */ if (wkhd) { while ((wk = LIST_FIRST(wkhd)) != NULL) { CTR2(KTR_SUJ, "softdep_setup_blkfree: blkno %jd wk type %d", blkno, wk->wk_type); WORKLIST_REMOVE(wk); if (wk->wk_type != D_JNEWBLK) { WORKLIST_INSERT(&bmsafemap->sm_freehd, wk); continue; } jnewblk = WK_JNEWBLK(wk); KASSERT(jnewblk->jn_state & GOINGAWAY, ("softdep_setup_blkfree: jnewblk not canceled.")); #ifdef SUJ_DEBUG /* * Assert that this block is free in the bitmap * before we discard the jnewblk. */ cgp = (struct cg *)bp->b_data; blksfree = cg_blksfree(cgp); bno = dtogd(fs, jnewblk->jn_blkno); for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { if (isset(blksfree, bno + i)) continue; panic("softdep_setup_blkfree: not free"); } #endif /* * Even if it's not attached we can free immediately * as the new bitmap is correct. */ wk->wk_state |= COMPLETE | ATTACHED; free_jnewblk(jnewblk); } } #ifdef SUJ_DEBUG /* * Assert that we are not freeing a block which has an outstanding * allocation dependency. */ fs = VFSTOUFS(mp)->um_fs; bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); end = blkno + frags; LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { /* * Don't match against blocks that will be freed when the * background write is done. */ if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) == (COMPLETE | DEPCOMPLETE)) continue; jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags; jend = jnewblk->jn_blkno + jnewblk->jn_frags; if ((blkno >= jstart && blkno < jend) || (end > jstart && end <= jend)) { printf("state 0x%X %jd - %d %d dep %p\n", jnewblk->jn_state, jnewblk->jn_blkno, jnewblk->jn_oldfrags, jnewblk->jn_frags, jnewblk->jn_dep); panic("softdep_setup_blkfree: " "%jd-%jd(%d) overlaps with %jd-%jd", blkno, end, frags, jstart, jend); } } #endif FREE_LOCK(ump); } /* * Revert a block allocation when the journal record that describes it * is not yet written. */ static int jnewblk_rollback(jnewblk, fs, cgp, blksfree) struct jnewblk *jnewblk; struct fs *fs; struct cg *cgp; uint8_t *blksfree; { ufs1_daddr_t fragno; long cgbno, bbase; int frags, blk; int i; frags = 0; cgbno = dtogd(fs, jnewblk->jn_blkno); /* * We have to test which frags need to be rolled back. We may * be operating on a stale copy when doing background writes. */ for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) if (isclr(blksfree, cgbno + i)) frags++; if (frags == 0) return (0); /* * This is mostly ffs_blkfree() sans some validation and * superblock updates. */ if (frags == fs->fs_frag) { fragno = fragstoblks(fs, cgbno); ffs_setblock(fs, blksfree, fragno); ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; } else { cgbno += jnewblk->jn_oldfrags; bbase = cgbno - fragnum(fs, cgbno); /* Decrement the old frags. */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1); /* Deallocate the fragment */ for (i = 0; i < frags; i++) setbit(blksfree, cgbno + i); cgp->cg_cs.cs_nffree += frags; /* Add back in counts associated with the new frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1); /* If a complete block has been reassembled, account for it. */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, blksfree, fragno)) { cgp->cg_cs.cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; } } stat_jnewblk++; jnewblk->jn_state &= ~ATTACHED; jnewblk->jn_state |= UNDONE; return (frags); } static void initiate_write_bmsafemap(bmsafemap, bp) struct bmsafemap *bmsafemap; struct buf *bp; /* The cg block. */ { struct jaddref *jaddref; struct jnewblk *jnewblk; uint8_t *inosused; uint8_t *blksfree; struct cg *cgp; struct fs *fs; ino_t ino; /* * If this is a background write, we did this at the time that * the copy was made, so do not need to do it again. */ if (bmsafemap->sm_state & IOSTARTED) return; bmsafemap->sm_state |= IOSTARTED; /* * Clear any inode allocations which are pending journal writes. */ if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) { cgp = (struct cg *)bp->b_data; fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; inosused = cg_inosused(cgp); LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) { ino = jaddref->ja_ino % fs->fs_ipg; if (isset(inosused, ino)) { if ((jaddref->ja_mode & IFMT) == IFDIR) cgp->cg_cs.cs_ndir--; cgp->cg_cs.cs_nifree++; clrbit(inosused, ino); jaddref->ja_state &= ~ATTACHED; jaddref->ja_state |= UNDONE; stat_jaddref++; } else panic("initiate_write_bmsafemap: inode %ju " "marked free", (uintmax_t)jaddref->ja_ino); } } /* * Clear any block allocations which are pending journal writes. */ if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { cgp = (struct cg *)bp->b_data; fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; blksfree = cg_blksfree(cgp); LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { if (jnewblk_rollback(jnewblk, fs, cgp, blksfree)) continue; panic("initiate_write_bmsafemap: block %jd " "marked free", jnewblk->jn_blkno); } } /* * Move allocation lists to the written lists so they can be * cleared once the block write is complete. */ LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr, inodedep, id_deps); LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, newblk, nb_deps); LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, wk_list); } /* * This routine is called during the completion interrupt * service routine for a disk write (from the procedure called * by the device driver to inform the filesystem caches of * a request completion). It should be called early in this * procedure, before the block is made available to other * processes or other routines are called. * */ static void softdep_disk_write_complete(bp) struct buf *bp; /* describes the completed disk write */ { struct worklist *wk; struct worklist *owk; struct ufsmount *ump; struct workhead reattach; struct freeblks *freeblks; struct buf *sbp; /* * If an error occurred while doing the write, then the data * has not hit the disk and the dependencies cannot be processed. * But we do have to go through and roll forward any dependencies * that were rolled back before the disk write. */ if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) { LIST_FOREACH(wk, &bp->b_dep, wk_list) { switch (wk->wk_type) { case D_PAGEDEP: handle_written_filepage(WK_PAGEDEP(wk), bp, 0); continue; case D_INODEDEP: handle_written_inodeblock(WK_INODEDEP(wk), bp, 0); continue; case D_BMSAFEMAP: handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp, 0); continue; case D_INDIRDEP: handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp, 0); continue; default: /* nothing to roll forward */ continue; } } return; } if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) return; ump = VFSTOUFS(wk->wk_mp); LIST_INIT(&reattach); /* * This lock must not be released anywhere in this code segment. */ sbp = NULL; owk = NULL; ACQUIRE_LOCK(ump); while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { WORKLIST_REMOVE(wk); atomic_add_long(&dep_write[wk->wk_type], 1); if (wk == owk) panic("duplicate worklist: %p\n", wk); owk = wk; switch (wk->wk_type) { case D_PAGEDEP: if (handle_written_filepage(WK_PAGEDEP(wk), bp, WRITESUCCEEDED)) WORKLIST_INSERT(&reattach, wk); continue; case D_INODEDEP: if (handle_written_inodeblock(WK_INODEDEP(wk), bp, WRITESUCCEEDED)) WORKLIST_INSERT(&reattach, wk); continue; case D_BMSAFEMAP: if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp, WRITESUCCEEDED)) WORKLIST_INSERT(&reattach, wk); continue; case D_MKDIR: handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); continue; case D_ALLOCDIRECT: wk->wk_state |= COMPLETE; handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL); continue; case D_ALLOCINDIR: wk->wk_state |= COMPLETE; handle_allocindir_partdone(WK_ALLOCINDIR(wk)); continue; case D_INDIRDEP: if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp, WRITESUCCEEDED)) WORKLIST_INSERT(&reattach, wk); continue; case D_FREEBLKS: wk->wk_state |= COMPLETE; freeblks = WK_FREEBLKS(wk); if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) add_to_worklist(wk, WK_NODELAY); continue; case D_FREEWORK: handle_written_freework(WK_FREEWORK(wk)); break; case D_JSEGDEP: free_jsegdep(WK_JSEGDEP(wk)); continue; case D_JSEG: handle_written_jseg(WK_JSEG(wk), bp); continue; case D_SBDEP: if (handle_written_sbdep(WK_SBDEP(wk), bp)) WORKLIST_INSERT(&reattach, wk); continue; case D_FREEDEP: free_freedep(WK_FREEDEP(wk)); continue; default: panic("handle_disk_write_complete: Unknown type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } /* * Reattach any requests that must be redone. */ while ((wk = LIST_FIRST(&reattach)) != NULL) { WORKLIST_REMOVE(wk); WORKLIST_INSERT(&bp->b_dep, wk); } FREE_LOCK(ump); if (sbp) brelse(sbp); } /* * Called from within softdep_disk_write_complete above. Note that * this routine is always called from interrupt level with further * splbio interrupts blocked. */ static void handle_allocdirect_partdone(adp, wkhd) struct allocdirect *adp; /* the completed allocdirect */ struct workhead *wkhd; /* Work to do when inode is writtne. */ { struct allocdirectlst *listhead; struct allocdirect *listadp; struct inodedep *inodedep; long bsize; if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) return; /* * The on-disk inode cannot claim to be any larger than the last * fragment that has been written. Otherwise, the on-disk inode * might have fragments that were not the last block in the file * which would corrupt the filesystem. Thus, we cannot free any * allocdirects after one whose ad_oldblkno claims a fragment as * these blocks must be rolled back to zero before writing the inode. * We check the currently active set of allocdirects in id_inoupdt * or id_extupdt as appropriate. */ inodedep = adp->ad_inodedep; bsize = inodedep->id_fs->fs_bsize; if (adp->ad_state & EXTDATA) listhead = &inodedep->id_extupdt; else listhead = &inodedep->id_inoupdt; TAILQ_FOREACH(listadp, listhead, ad_next) { /* found our block */ if (listadp == adp) break; /* continue if ad_oldlbn is not a fragment */ if (listadp->ad_oldsize == 0 || listadp->ad_oldsize == bsize) continue; /* hit a fragment */ return; } /* * If we have reached the end of the current list without * finding the just finished dependency, then it must be * on the future dependency list. Future dependencies cannot * be freed until they are moved to the current list. */ if (listadp == NULL) { #ifdef DEBUG if (adp->ad_state & EXTDATA) listhead = &inodedep->id_newextupdt; else listhead = &inodedep->id_newinoupdt; TAILQ_FOREACH(listadp, listhead, ad_next) /* found our block */ if (listadp == adp) break; if (listadp == NULL) panic("handle_allocdirect_partdone: lost dep"); #endif /* DEBUG */ return; } /* * If we have found the just finished dependency, then queue * it along with anything that follows it that is complete. * Since the pointer has not yet been written in the inode * as the dependency prevents it, place the allocdirect on the * bufwait list where it will be freed once the pointer is * valid. */ if (wkhd == NULL) wkhd = &inodedep->id_bufwait; for (; adp; adp = listadp) { listadp = TAILQ_NEXT(adp, ad_next); if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) return; TAILQ_REMOVE(listhead, adp, ad_next); WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list); } } /* * Called from within softdep_disk_write_complete above. This routine * completes successfully written allocindirs. */ static void handle_allocindir_partdone(aip) struct allocindir *aip; /* the completed allocindir */ { struct indirdep *indirdep; if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) return; indirdep = aip->ai_indirdep; LIST_REMOVE(aip, ai_next); /* * Don't set a pointer while the buffer is undergoing IO or while * we have active truncations. */ if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) { LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); return; } if (indirdep->ir_state & UFS1FMT) ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = aip->ai_newblkno; else ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = aip->ai_newblkno; /* * Await the pointer write before freeing the allocindir. */ LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next); } /* * Release segments held on a jwork list. */ static void handle_jwork(wkhd) struct workhead *wkhd; { struct worklist *wk; while ((wk = LIST_FIRST(wkhd)) != NULL) { WORKLIST_REMOVE(wk); switch (wk->wk_type) { case D_JSEGDEP: free_jsegdep(WK_JSEGDEP(wk)); continue; case D_FREEDEP: free_freedep(WK_FREEDEP(wk)); continue; case D_FREEFRAG: rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep)); WORKITEM_FREE(wk, D_FREEFRAG); continue; case D_FREEWORK: handle_written_freework(WK_FREEWORK(wk)); continue; default: panic("handle_jwork: Unknown type %s\n", TYPENAME(wk->wk_type)); } } } /* * Handle the bufwait list on an inode when it is safe to release items * held there. This normally happens after an inode block is written but * may be delayed and handled later if there are pending journal items that * are not yet safe to be released. */ static struct freefile * handle_bufwait(inodedep, refhd) struct inodedep *inodedep; struct workhead *refhd; { struct jaddref *jaddref; struct freefile *freefile; struct worklist *wk; freefile = NULL; while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { WORKLIST_REMOVE(wk); switch (wk->wk_type) { case D_FREEFILE: /* * We defer adding freefile to the worklist * until all other additions have been made to * ensure that it will be done after all the * old blocks have been freed. */ if (freefile != NULL) panic("handle_bufwait: freefile"); freefile = WK_FREEFILE(wk); continue; case D_MKDIR: handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); continue; case D_DIRADD: diradd_inode_written(WK_DIRADD(wk), inodedep); continue; case D_FREEFRAG: wk->wk_state |= COMPLETE; if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE) add_to_worklist(wk, 0); continue; case D_DIRREM: wk->wk_state |= COMPLETE; add_to_worklist(wk, 0); continue; case D_ALLOCDIRECT: case D_ALLOCINDIR: free_newblk(WK_NEWBLK(wk)); continue; case D_JNEWBLK: wk->wk_state |= COMPLETE; free_jnewblk(WK_JNEWBLK(wk)); continue; /* * Save freed journal segments and add references on * the supplied list which will delay their release * until the cg bitmap is cleared on disk. */ case D_JSEGDEP: if (refhd == NULL) free_jsegdep(WK_JSEGDEP(wk)); else WORKLIST_INSERT(refhd, wk); continue; case D_JADDREF: jaddref = WK_JADDREF(wk); TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); /* * Transfer any jaddrefs to the list to be freed with * the bitmap if we're handling a removed file. */ if (refhd == NULL) { wk->wk_state |= COMPLETE; free_jaddref(jaddref); } else WORKLIST_INSERT(refhd, wk); continue; default: panic("handle_bufwait: Unknown type %p(%s)", wk, TYPENAME(wk->wk_type)); /* NOTREACHED */ } } return (freefile); } /* * Called from within softdep_disk_write_complete above to restore * in-memory inode block contents to their most up-to-date state. Note * that this routine is always called from interrupt level with further * interrupts from this device blocked. * * If the write did not succeed, we will do all the roll-forward * operations, but we will not take the actions that will allow its * dependencies to be processed. */ static int handle_written_inodeblock(inodedep, bp, flags) struct inodedep *inodedep; struct buf *bp; /* buffer containing the inode block */ int flags; { struct freefile *freefile; struct allocdirect *adp, *nextadp; struct ufs1_dinode *dp1 = NULL; struct ufs2_dinode *dp2 = NULL; struct workhead wkhd; int hadchanges, fstype; ino_t freelink; LIST_INIT(&wkhd); hadchanges = 0; freefile = NULL; if ((inodedep->id_state & IOSTARTED) == 0) panic("handle_written_inodeblock: not started"); inodedep->id_state &= ~IOSTARTED; if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { fstype = UFS1; dp1 = (struct ufs1_dinode *)bp->b_data + ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); freelink = dp1->di_freelink; } else { fstype = UFS2; dp2 = (struct ufs2_dinode *)bp->b_data + ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); freelink = dp2->di_freelink; } /* * Leave this inodeblock dirty until it's in the list. */ if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED && (flags & WRITESUCCEEDED)) { struct inodedep *inon; inon = TAILQ_NEXT(inodedep, id_unlinked); if ((inon == NULL && freelink == 0) || (inon && inon->id_ino == freelink)) { if (inon) inon->id_state |= UNLINKPREV; inodedep->id_state |= UNLINKNEXT; } hadchanges = 1; } /* * If we had to rollback the inode allocation because of * bitmaps being incomplete, then simply restore it. * Keep the block dirty so that it will not be reclaimed until * all associated dependencies have been cleared and the * corresponding updates written to disk. */ if (inodedep->id_savedino1 != NULL) { hadchanges = 1; if (fstype == UFS1) *dp1 = *inodedep->id_savedino1; else *dp2 = *inodedep->id_savedino2; free(inodedep->id_savedino1, M_SAVEDINO); inodedep->id_savedino1 = NULL; if ((bp->b_flags & B_DELWRI) == 0) stat_inode_bitmap++; bdirty(bp); /* * If the inode is clear here and GOINGAWAY it will never * be written. Process the bufwait and clear any pending * work which may include the freefile. */ if (inodedep->id_state & GOINGAWAY) goto bufwait; return (1); } if (flags & WRITESUCCEEDED) inodedep->id_state |= COMPLETE; /* * Roll forward anything that had to be rolled back before * the inode could be updated. */ for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { nextadp = TAILQ_NEXT(adp, ad_next); if (adp->ad_state & ATTACHED) panic("handle_written_inodeblock: new entry"); if (fstype == UFS1) { if (adp->ad_offset < UFS_NDADDR) { if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno) panic("%s %s #%jd mismatch %d != %jd", "handle_written_inodeblock:", "direct pointer", (intmax_t)adp->ad_offset, dp1->di_db[adp->ad_offset], (intmax_t)adp->ad_oldblkno); dp1->di_db[adp->ad_offset] = adp->ad_newblkno; } else { if (dp1->di_ib[adp->ad_offset - UFS_NDADDR] != 0) panic("%s: %s #%jd allocated as %d", "handle_written_inodeblock", "indirect pointer", (intmax_t)adp->ad_offset - UFS_NDADDR, dp1->di_ib[adp->ad_offset - UFS_NDADDR]); dp1->di_ib[adp->ad_offset - UFS_NDADDR] = adp->ad_newblkno; } } else { if (adp->ad_offset < UFS_NDADDR) { if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno) panic("%s: %s #%jd %s %jd != %jd", "handle_written_inodeblock", "direct pointer", (intmax_t)adp->ad_offset, "mismatch", (intmax_t)dp2->di_db[adp->ad_offset], (intmax_t)adp->ad_oldblkno); dp2->di_db[adp->ad_offset] = adp->ad_newblkno; } else { if (dp2->di_ib[adp->ad_offset - UFS_NDADDR] != 0) panic("%s: %s #%jd allocated as %jd", "handle_written_inodeblock", "indirect pointer", (intmax_t)adp->ad_offset - UFS_NDADDR, (intmax_t) dp2->di_ib[adp->ad_offset - UFS_NDADDR]); dp2->di_ib[adp->ad_offset - UFS_NDADDR] = adp->ad_newblkno; } } adp->ad_state &= ~UNDONE; adp->ad_state |= ATTACHED; hadchanges = 1; } for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { nextadp = TAILQ_NEXT(adp, ad_next); if (adp->ad_state & ATTACHED) panic("handle_written_inodeblock: new entry"); if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno) panic("%s: direct pointers #%jd %s %jd != %jd", "handle_written_inodeblock", (intmax_t)adp->ad_offset, "mismatch", (intmax_t)dp2->di_extb[adp->ad_offset], (intmax_t)adp->ad_oldblkno); dp2->di_extb[adp->ad_offset] = adp->ad_newblkno; adp->ad_state &= ~UNDONE; adp->ad_state |= ATTACHED; hadchanges = 1; } if (hadchanges && (bp->b_flags & B_DELWRI) == 0) stat_direct_blk_ptrs++; /* * Reset the file size to its most up-to-date value. */ if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) panic("handle_written_inodeblock: bad size"); if (inodedep->id_savednlink > LINK_MAX) panic("handle_written_inodeblock: Invalid link count " "%jd for inodedep %p", (uintmax_t)inodedep->id_savednlink, inodedep); if (fstype == UFS1) { if (dp1->di_nlink != inodedep->id_savednlink) { dp1->di_nlink = inodedep->id_savednlink; hadchanges = 1; } if (dp1->di_size != inodedep->id_savedsize) { dp1->di_size = inodedep->id_savedsize; hadchanges = 1; } } else { if (dp2->di_nlink != inodedep->id_savednlink) { dp2->di_nlink = inodedep->id_savednlink; hadchanges = 1; } if (dp2->di_size != inodedep->id_savedsize) { dp2->di_size = inodedep->id_savedsize; hadchanges = 1; } if (dp2->di_extsize != inodedep->id_savedextsize) { dp2->di_extsize = inodedep->id_savedextsize; hadchanges = 1; } } inodedep->id_savedsize = -1; inodedep->id_savedextsize = -1; inodedep->id_savednlink = -1; /* * If there were any rollbacks in the inode block, then it must be * marked dirty so that its will eventually get written back in * its correct form. */ if (hadchanges) bdirty(bp); bufwait: /* * If the write did not succeed, we have done all the roll-forward * operations, but we cannot take the actions that will allow its * dependencies to be processed. */ if ((flags & WRITESUCCEEDED) == 0) return (hadchanges); /* * Process any allocdirects that completed during the update. */ if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) handle_allocdirect_partdone(adp, &wkhd); if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) handle_allocdirect_partdone(adp, &wkhd); /* * Process deallocations that were held pending until the * inode had been written to disk. Freeing of the inode * is delayed until after all blocks have been freed to * avoid creation of new triples * before the old ones have been deleted. Completely * unlinked inodes are not processed until the unlinked * inode list is written or the last reference is removed. */ if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) { freefile = handle_bufwait(inodedep, NULL); if (freefile && !LIST_EMPTY(&wkhd)) { WORKLIST_INSERT(&wkhd, &freefile->fx_list); freefile = NULL; } } /* * Move rolled forward dependency completions to the bufwait list * now that those that were already written have been processed. */ if (!LIST_EMPTY(&wkhd) && hadchanges == 0) panic("handle_written_inodeblock: bufwait but no changes"); jwork_move(&inodedep->id_bufwait, &wkhd); if (freefile != NULL) { /* * If the inode is goingaway it was never written. Fake up * the state here so free_inodedep() can succeed. */ if (inodedep->id_state & GOINGAWAY) inodedep->id_state |= COMPLETE | DEPCOMPLETE; if (free_inodedep(inodedep) == 0) panic("handle_written_inodeblock: live inodedep %p", inodedep); add_to_worklist(&freefile->fx_list, 0); return (0); } /* * If no outstanding dependencies, free it. */ if (free_inodedep(inodedep) || (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 && TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && TAILQ_FIRST(&inodedep->id_extupdt) == 0 && LIST_FIRST(&inodedep->id_bufwait) == 0)) return (0); return (hadchanges); } /* * Perform needed roll-forwards and kick off any dependencies that * can now be processed. * * If the write did not succeed, we will do all the roll-forward * operations, but we will not take the actions that will allow its * dependencies to be processed. */ static int handle_written_indirdep(indirdep, bp, bpp, flags) struct indirdep *indirdep; struct buf *bp; struct buf **bpp; int flags; { struct allocindir *aip; struct buf *sbp; int chgs; if (indirdep->ir_state & GOINGAWAY) panic("handle_written_indirdep: indirdep gone"); if ((indirdep->ir_state & IOSTARTED) == 0) panic("handle_written_indirdep: IO not started"); chgs = 0; /* * If there were rollbacks revert them here. */ if (indirdep->ir_saveddata) { bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); if (TAILQ_EMPTY(&indirdep->ir_trunc)) { free(indirdep->ir_saveddata, M_INDIRDEP); indirdep->ir_saveddata = NULL; } chgs = 1; } indirdep->ir_state &= ~(UNDONE | IOSTARTED); indirdep->ir_state |= ATTACHED; /* * If the write did not succeed, we have done all the roll-forward * operations, but we cannot take the actions that will allow its * dependencies to be processed. */ if ((flags & WRITESUCCEEDED) == 0) { stat_indir_blk_ptrs++; bdirty(bp); return (1); } /* * Move allocindirs with written pointers to the completehd if * the indirdep's pointer is not yet written. Otherwise * free them here. */ while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) { LIST_REMOVE(aip, ai_next); if ((indirdep->ir_state & DEPCOMPLETE) == 0) { LIST_INSERT_HEAD(&indirdep->ir_completehd, aip, ai_next); newblk_freefrag(&aip->ai_block); continue; } free_newblk(&aip->ai_block); } /* * Move allocindirs that have finished dependency processing from * the done list to the write list after updating the pointers. */ if (TAILQ_EMPTY(&indirdep->ir_trunc)) { while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) { handle_allocindir_partdone(aip); if (aip == LIST_FIRST(&indirdep->ir_donehd)) panic("disk_write_complete: not gone"); chgs = 1; } } /* * Preserve the indirdep if there were any changes or if it is not * yet valid on disk. */ if (chgs) { stat_indir_blk_ptrs++; bdirty(bp); return (1); } /* * If there were no changes we can discard the savedbp and detach * ourselves from the buf. We are only carrying completed pointers * in this case. */ sbp = indirdep->ir_savebp; sbp->b_flags |= B_INVAL | B_NOCACHE; indirdep->ir_savebp = NULL; indirdep->ir_bp = NULL; if (*bpp != NULL) panic("handle_written_indirdep: bp already exists."); *bpp = sbp; /* * The indirdep may not be freed until its parent points at it. */ if (indirdep->ir_state & DEPCOMPLETE) free_indirdep(indirdep); return (0); } /* * Process a diradd entry after its dependent inode has been written. * This routine must be called with splbio interrupts blocked. */ static void diradd_inode_written(dap, inodedep) struct diradd *dap; struct inodedep *inodedep; { dap->da_state |= COMPLETE; complete_diradd(dap); WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); } /* * Returns true if the bmsafemap will have rollbacks when written. Must only * be called with the per-filesystem lock and the buf lock on the cg held. */ static int bmsafemap_backgroundwrite(bmsafemap, bp) struct bmsafemap *bmsafemap; struct buf *bp; { int dirty; LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp)); dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) | !LIST_EMPTY(&bmsafemap->sm_jnewblkhd); /* * If we're initiating a background write we need to process the * rollbacks as they exist now, not as they exist when IO starts. * No other consumers will look at the contents of the shadowed * buf so this is safe to do here. */ if (bp->b_xflags & BX_BKGRDMARKER) initiate_write_bmsafemap(bmsafemap, bp); return (dirty); } /* * Re-apply an allocation when a cg write is complete. */ static int jnewblk_rollforward(jnewblk, fs, cgp, blksfree) struct jnewblk *jnewblk; struct fs *fs; struct cg *cgp; uint8_t *blksfree; { ufs1_daddr_t fragno; ufs2_daddr_t blkno; long cgbno, bbase; int frags, blk; int i; frags = 0; cgbno = dtogd(fs, jnewblk->jn_blkno); for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { if (isclr(blksfree, cgbno + i)) panic("jnewblk_rollforward: re-allocated fragment"); frags++; } if (frags == fs->fs_frag) { blkno = fragstoblks(fs, cgbno); ffs_clrblock(fs, blksfree, (long)blkno); ffs_clusteracct(fs, cgp, blkno, -1); cgp->cg_cs.cs_nbfree--; } else { bbase = cgbno - fragnum(fs, cgbno); cgbno += jnewblk->jn_oldfrags; /* If a complete block had been reassembled, account for it. */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, blksfree, fragno)) { cgp->cg_cs.cs_nffree += fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, -1); cgp->cg_cs.cs_nbfree--; } /* Decrement the old frags. */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1); /* Allocate the fragment */ for (i = 0; i < frags; i++) clrbit(blksfree, cgbno + i); cgp->cg_cs.cs_nffree -= frags; /* Add back in counts associated with the new frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1); } return (frags); } /* * Complete a write to a bmsafemap structure. Roll forward any bitmap * changes if it's not a background write. Set all written dependencies * to DEPCOMPLETE and free the structure if possible. * * If the write did not succeed, we will do all the roll-forward * operations, but we will not take the actions that will allow its * dependencies to be processed. */ static int handle_written_bmsafemap(bmsafemap, bp, flags) struct bmsafemap *bmsafemap; struct buf *bp; int flags; { struct newblk *newblk; struct inodedep *inodedep; struct jaddref *jaddref, *jatmp; struct jnewblk *jnewblk, *jntmp; struct ufsmount *ump; uint8_t *inosused; uint8_t *blksfree; struct cg *cgp; struct fs *fs; ino_t ino; int foreground; int chgs; if ((bmsafemap->sm_state & IOSTARTED) == 0) panic("handle_written_bmsafemap: Not started\n"); ump = VFSTOUFS(bmsafemap->sm_list.wk_mp); chgs = 0; bmsafemap->sm_state &= ~IOSTARTED; foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0; /* * If write was successful, release journal work that was waiting * on the write. Otherwise move the work back. */ if (flags & WRITESUCCEEDED) handle_jwork(&bmsafemap->sm_freewr); else LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, wk_list); /* * Restore unwritten inode allocation pending jaddref writes. */ if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) { cgp = (struct cg *)bp->b_data; fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; inosused = cg_inosused(cgp); LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps, jatmp) { if ((jaddref->ja_state & UNDONE) == 0) continue; ino = jaddref->ja_ino % fs->fs_ipg; if (isset(inosused, ino)) panic("handle_written_bmsafemap: " "re-allocated inode"); /* Do the roll-forward only if it's a real copy. */ if (foreground) { if ((jaddref->ja_mode & IFMT) == IFDIR) cgp->cg_cs.cs_ndir++; cgp->cg_cs.cs_nifree--; setbit(inosused, ino); chgs = 1; } jaddref->ja_state &= ~UNDONE; jaddref->ja_state |= ATTACHED; free_jaddref(jaddref); } } /* * Restore any block allocations which are pending journal writes. */ if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { cgp = (struct cg *)bp->b_data; fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; blksfree = cg_blksfree(cgp); LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps, jntmp) { if ((jnewblk->jn_state & UNDONE) == 0) continue; /* Do the roll-forward only if it's a real copy. */ if (foreground && jnewblk_rollforward(jnewblk, fs, cgp, blksfree)) chgs = 1; jnewblk->jn_state &= ~(UNDONE | NEWBLOCK); jnewblk->jn_state |= ATTACHED; free_jnewblk(jnewblk); } } /* * If the write did not succeed, we have done all the roll-forward * operations, but we cannot take the actions that will allow its * dependencies to be processed. */ if ((flags & WRITESUCCEEDED) == 0) { LIST_CONCAT(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, newblk, nb_deps); LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, wk_list); if (foreground) bdirty(bp); return (1); } while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) { newblk->nb_state |= DEPCOMPLETE; newblk->nb_state &= ~ONDEPLIST; newblk->nb_bmsafemap = NULL; LIST_REMOVE(newblk, nb_deps); if (newblk->nb_list.wk_type == D_ALLOCDIRECT) handle_allocdirect_partdone( WK_ALLOCDIRECT(&newblk->nb_list), NULL); else if (newblk->nb_list.wk_type == D_ALLOCINDIR) handle_allocindir_partdone( WK_ALLOCINDIR(&newblk->nb_list)); else if (newblk->nb_list.wk_type != D_NEWBLK) panic("handle_written_bmsafemap: Unexpected type: %s", TYPENAME(newblk->nb_list.wk_type)); } while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) { inodedep->id_state |= DEPCOMPLETE; inodedep->id_state &= ~ONDEPLIST; LIST_REMOVE(inodedep, id_deps); inodedep->id_bmsafemap = NULL; } LIST_REMOVE(bmsafemap, sm_next); if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) && LIST_EMPTY(&bmsafemap->sm_jnewblkhd) && LIST_EMPTY(&bmsafemap->sm_newblkhd) && LIST_EMPTY(&bmsafemap->sm_inodedephd) && LIST_EMPTY(&bmsafemap->sm_freehd)) { LIST_REMOVE(bmsafemap, sm_hash); WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); return (0); } LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); if (foreground) bdirty(bp); return (1); } /* * Try to free a mkdir dependency. */ static void complete_mkdir(mkdir) struct mkdir *mkdir; { struct diradd *dap; if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE) return; LIST_REMOVE(mkdir, md_mkdirs); dap = mkdir->md_diradd; dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) { dap->da_state |= DEPCOMPLETE; complete_diradd(dap); } WORKITEM_FREE(mkdir, D_MKDIR); } /* * Handle the completion of a mkdir dependency. */ static void handle_written_mkdir(mkdir, type) struct mkdir *mkdir; int type; { if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type) panic("handle_written_mkdir: bad type"); mkdir->md_state |= COMPLETE; complete_mkdir(mkdir); } static int free_pagedep(pagedep) struct pagedep *pagedep; { int i; if (pagedep->pd_state & NEWBLOCK) return (0); if (!LIST_EMPTY(&pagedep->pd_dirremhd)) return (0); for (i = 0; i < DAHASHSZ; i++) if (!LIST_EMPTY(&pagedep->pd_diraddhd[i])) return (0); if (!LIST_EMPTY(&pagedep->pd_pendinghd)) return (0); if (!LIST_EMPTY(&pagedep->pd_jmvrefhd)) return (0); if (pagedep->pd_state & ONWORKLIST) WORKLIST_REMOVE(&pagedep->pd_list); LIST_REMOVE(pagedep, pd_hash); WORKITEM_FREE(pagedep, D_PAGEDEP); return (1); } /* * Called from within softdep_disk_write_complete above. * A write operation was just completed. Removed inodes can * now be freed and associated block pointers may be committed. * Note that this routine is always called from interrupt level * with further interrupts from this device blocked. * * If the write did not succeed, we will do all the roll-forward * operations, but we will not take the actions that will allow its * dependencies to be processed. */ static int handle_written_filepage(pagedep, bp, flags) struct pagedep *pagedep; struct buf *bp; /* buffer containing the written page */ int flags; { struct dirrem *dirrem; struct diradd *dap, *nextdap; struct direct *ep; int i, chgs; if ((pagedep->pd_state & IOSTARTED) == 0) panic("handle_written_filepage: not started"); pagedep->pd_state &= ~IOSTARTED; if ((flags & WRITESUCCEEDED) == 0) goto rollforward; /* * Process any directory removals that have been committed. */ while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { LIST_REMOVE(dirrem, dm_next); dirrem->dm_state |= COMPLETE; dirrem->dm_dirinum = pagedep->pd_ino; KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), ("handle_written_filepage: Journal entries not written.")); add_to_worklist(&dirrem->dm_list, 0); } /* * Free any directory additions that have been committed. * If it is a newly allocated block, we have to wait until * the on-disk directory inode claims the new block. */ if ((pagedep->pd_state & NEWBLOCK) == 0) while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) free_diradd(dap, NULL); rollforward: /* * Uncommitted directory entries must be restored. */ for (chgs = 0, i = 0; i < DAHASHSZ; i++) { for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; dap = nextdap) { nextdap = LIST_NEXT(dap, da_pdlist); if (dap->da_state & ATTACHED) panic("handle_written_filepage: attached"); ep = (struct direct *) ((char *)bp->b_data + dap->da_offset); ep->d_ino = dap->da_newinum; dap->da_state &= ~UNDONE; dap->da_state |= ATTACHED; chgs = 1; /* * If the inode referenced by the directory has * been written out, then the dependency can be * moved to the pending list. */ if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); } } } /* * If there were any rollbacks in the directory, then it must be * marked dirty so that its will eventually get written back in * its correct form. */ if (chgs || (flags & WRITESUCCEEDED) == 0) { if ((bp->b_flags & B_DELWRI) == 0) stat_dir_entry++; bdirty(bp); return (1); } /* * If we are not waiting for a new directory block to be * claimed by its inode, then the pagedep will be freed. * Otherwise it will remain to track any new entries on * the page in case they are fsync'ed. */ free_pagedep(pagedep); return (0); } /* * Writing back in-core inode structures. * * The filesystem only accesses an inode's contents when it occupies an * "in-core" inode structure. These "in-core" structures are separate from * the page frames used to cache inode blocks. Only the latter are * transferred to/from the disk. So, when the updated contents of the * "in-core" inode structure are copied to the corresponding in-memory inode * block, the dependencies are also transferred. The following procedure is * called when copying a dirty "in-core" inode to a cached inode block. */ /* * Called when an inode is loaded from disk. If the effective link count * differed from the actual link count when it was last flushed, then we * need to ensure that the correct effective link count is put back. */ void softdep_load_inodeblock(ip) struct inode *ip; /* the "in_core" copy of the inode */ { struct inodedep *inodedep; struct ufsmount *ump; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_load_inodeblock called on non-softdep filesystem")); /* * Check for alternate nlink count. */ ip->i_effnlink = ip->i_nlink; ACQUIRE_LOCK(ump); if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0) { FREE_LOCK(ump); return; } ip->i_effnlink -= inodedep->id_nlinkdelta; FREE_LOCK(ump); } /* * This routine is called just before the "in-core" inode * information is to be copied to the in-memory inode block. * Recall that an inode block contains several inodes. If * the force flag is set, then the dependencies will be * cleared so that the update can always be made. Note that * the buffer is locked when this routine is called, so we * will never be in the middle of writing the inode block * to disk. */ void softdep_update_inodeblock(ip, bp, waitfor) struct inode *ip; /* the "in_core" copy of the inode */ struct buf *bp; /* the buffer containing the inode block */ int waitfor; /* nonzero => update must be allowed */ { struct inodedep *inodedep; struct inoref *inoref; struct ufsmount *ump; struct worklist *wk; struct mount *mp; struct buf *ibp; struct fs *fs; int error; ump = ITOUMP(ip); mp = UFSTOVFS(ump); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_update_inodeblock called on non-softdep filesystem")); fs = ump->um_fs; /* * Preserve the freelink that is on disk. clear_unlinked_inodedep() * does not have access to the in-core ip so must write directly into * the inode block buffer when setting freelink. */ if (fs->fs_magic == FS_UFS1_MAGIC) DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number))->di_freelink); else DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number))->di_freelink); /* * If the effective link count is not equal to the actual link * count, then we must track the difference in an inodedep while * the inode is (potentially) tossed out of the cache. Otherwise, * if there is no existing inodedep, then there are no dependencies * to track. */ ACQUIRE_LOCK(ump); again: if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { FREE_LOCK(ump); if (ip->i_effnlink != ip->i_nlink) panic("softdep_update_inodeblock: bad link count"); return; } if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) panic("softdep_update_inodeblock: bad delta"); /* * If we're flushing all dependencies we must also move any waiting * for journal writes onto the bufwait list prior to I/O. */ if (waitfor) { TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) == DEPCOMPLETE) { jwait(&inoref->if_list, MNT_WAIT); goto again; } } } /* * Changes have been initiated. Anything depending on these * changes cannot occur until this inode has been written. */ inodedep->id_state &= ~COMPLETE; if ((inodedep->id_state & ONWORKLIST) == 0) WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); /* * Any new dependencies associated with the incore inode must * now be moved to the list associated with the buffer holding * the in-memory copy of the inode. Once merged process any * allocdirects that are completed by the merger. */ merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); if (!TAILQ_EMPTY(&inodedep->id_inoupdt)) handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt), NULL); merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); if (!TAILQ_EMPTY(&inodedep->id_extupdt)) handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt), NULL); /* * Now that the inode has been pushed into the buffer, the * operations dependent on the inode being written to disk * can be moved to the id_bufwait so that they will be * processed when the buffer I/O completes. */ while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { WORKLIST_REMOVE(wk); WORKLIST_INSERT(&inodedep->id_bufwait, wk); } /* * Newly allocated inodes cannot be written until the bitmap * that allocates them have been written (indicated by * DEPCOMPLETE being set in id_state). If we are doing a * forced sync (e.g., an fsync on a file), we force the bitmap * to be written so that the update can be done. */ if (waitfor == 0) { FREE_LOCK(ump); return; } retry: if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) { FREE_LOCK(ump); return; } ibp = inodedep->id_bmsafemap->sm_buf; ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT); if (ibp == NULL) { /* * If ibp came back as NULL, the dependency could have been * freed while we slept. Look it up again, and check to see * that it has completed. */ if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) goto retry; FREE_LOCK(ump); return; } FREE_LOCK(ump); if ((error = bwrite(ibp)) != 0) softdep_error("softdep_update_inodeblock: bwrite", error); } /* * Merge the a new inode dependency list (such as id_newinoupdt) into an * old inode dependency list (such as id_inoupdt). This routine must be * called with splbio interrupts blocked. */ static void merge_inode_lists(newlisthead, oldlisthead) struct allocdirectlst *newlisthead; struct allocdirectlst *oldlisthead; { struct allocdirect *listadp, *newadp; newadp = TAILQ_FIRST(newlisthead); for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { if (listadp->ad_offset < newadp->ad_offset) { listadp = TAILQ_NEXT(listadp, ad_next); continue; } TAILQ_REMOVE(newlisthead, newadp, ad_next); TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); if (listadp->ad_offset == newadp->ad_offset) { allocdirect_merge(oldlisthead, newadp, listadp); listadp = newadp; } newadp = TAILQ_FIRST(newlisthead); } while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { TAILQ_REMOVE(newlisthead, newadp, ad_next); TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); } } /* * If we are doing an fsync, then we must ensure that any directory * entries for the inode have been written after the inode gets to disk. */ int softdep_fsync(vp) struct vnode *vp; /* the "in_core" copy of the inode */ { struct inodedep *inodedep; struct pagedep *pagedep; struct inoref *inoref; struct ufsmount *ump; struct worklist *wk; struct diradd *dap; struct mount *mp; struct vnode *pvp; struct inode *ip; struct buf *bp; struct fs *fs; struct thread *td = curthread; int error, flushparent, pagedep_new_block; ino_t parentino; ufs_lbn_t lbn; ip = VTOI(vp); mp = vp->v_mount; ump = VFSTOUFS(mp); fs = ump->um_fs; if (MOUNTEDSOFTDEP(mp) == 0) return (0); ACQUIRE_LOCK(ump); restart: if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { FREE_LOCK(ump); return (0); } TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) == DEPCOMPLETE) { jwait(&inoref->if_list, MNT_WAIT); goto restart; } } if (!LIST_EMPTY(&inodedep->id_inowait) || !TAILQ_EMPTY(&inodedep->id_extupdt) || !TAILQ_EMPTY(&inodedep->id_newextupdt) || !TAILQ_EMPTY(&inodedep->id_inoupdt) || !TAILQ_EMPTY(&inodedep->id_newinoupdt)) panic("softdep_fsync: pending ops %p", inodedep); for (error = 0, flushparent = 0; ; ) { if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) break; if (wk->wk_type != D_DIRADD) panic("softdep_fsync: Unexpected type %s", TYPENAME(wk->wk_type)); dap = WK_DIRADD(wk); /* * Flush our parent if this directory entry has a MKDIR_PARENT * dependency or is contained in a newly allocated block. */ if (dap->da_state & DIRCHG) pagedep = dap->da_previous->dm_pagedep; else pagedep = dap->da_pagedep; parentino = pagedep->pd_ino; lbn = pagedep->pd_lbn; if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) panic("softdep_fsync: dirty"); if ((dap->da_state & MKDIR_PARENT) || (pagedep->pd_state & NEWBLOCK)) flushparent = 1; else flushparent = 0; /* * If we are being fsync'ed as part of vgone'ing this vnode, * then we will not be able to release and recover the * vnode below, so we just have to give up on writing its * directory entry out. It will eventually be written, just * not now, but then the user was not asking to have it * written, so we are not breaking any promises. */ if (vp->v_iflag & VI_DOOMED) break; /* * We prevent deadlock by always fetching inodes from the * root, moving down the directory tree. Thus, when fetching * our parent directory, we first try to get the lock. If * that fails, we must unlock ourselves before requesting * the lock on our parent. See the comment in ufs_lookup * for details on possible races. */ FREE_LOCK(ump); if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp, FFSV_FORCEINSMQ)) { error = vfs_busy(mp, MBF_NOWAIT); if (error != 0) { vfs_ref(mp); VOP_UNLOCK(vp, 0); error = vfs_busy(mp, 0); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vfs_rel(mp); if (error != 0) return (ENOENT); if (vp->v_iflag & VI_DOOMED) { vfs_unbusy(mp); return (ENOENT); } } VOP_UNLOCK(vp, 0); error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, &pvp, FFSV_FORCEINSMQ); vfs_unbusy(mp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (vp->v_iflag & VI_DOOMED) { if (error == 0) vput(pvp); error = ENOENT; } if (error != 0) return (error); } /* * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps * that are contained in direct blocks will be resolved by * doing a ffs_update. Pagedeps contained in indirect blocks * may require a complete sync'ing of the directory. So, we * try the cheap and fast ffs_update first, and if that fails, * then we do the slower ffs_syncvnode of the directory. */ if (flushparent) { int locked; if ((error = ffs_update(pvp, 1)) != 0) { vput(pvp); return (error); } ACQUIRE_LOCK(ump); locked = 1; if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) { if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) { if (wk->wk_type != D_DIRADD) panic("softdep_fsync: Unexpected type %s", TYPENAME(wk->wk_type)); dap = WK_DIRADD(wk); if (dap->da_state & DIRCHG) pagedep = dap->da_previous->dm_pagedep; else pagedep = dap->da_pagedep; pagedep_new_block = pagedep->pd_state & NEWBLOCK; FREE_LOCK(ump); locked = 0; if (pagedep_new_block && (error = ffs_syncvnode(pvp, MNT_WAIT, 0))) { vput(pvp); return (error); } } } if (locked) FREE_LOCK(ump); } /* * Flush directory page containing the inode's name. */ error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, &bp); if (error == 0) error = bwrite(bp); else brelse(bp); vput(pvp); if (error != 0) return (error); ACQUIRE_LOCK(ump); if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) break; } FREE_LOCK(ump); return (0); } /* * Flush all the dirty bitmaps associated with the block device * before flushing the rest of the dirty blocks so as to reduce * the number of dependencies that will have to be rolled back. * * XXX Unused? */ void softdep_fsync_mountdev(vp) struct vnode *vp; { struct buf *bp, *nbp; struct worklist *wk; struct bufobj *bo; if (!vn_isdisk(vp, NULL)) panic("softdep_fsync_mountdev: vnode not a disk"); bo = &vp->v_bufobj; restart: BO_LOCK(bo); TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { /* * If it is already scheduled, skip to the next buffer. */ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) continue; if ((bp->b_flags & B_DELWRI) == 0) panic("softdep_fsync_mountdev: not dirty"); /* * We are only interested in bitmaps with outstanding * dependencies. */ if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || wk->wk_type != D_BMSAFEMAP || (bp->b_vflags & BV_BKGRDINPROG)) { BUF_UNLOCK(bp); continue; } BO_UNLOCK(bo); bremfree(bp); (void) bawrite(bp); goto restart; } drain_output(vp); BO_UNLOCK(bo); } /* * Sync all cylinder groups that were dirty at the time this function is * called. Newly dirtied cgs will be inserted before the sentinel. This * is used to flush freedep activity that may be holding up writes to a * indirect block. */ static int sync_cgs(mp, waitfor) struct mount *mp; int waitfor; { struct bmsafemap *bmsafemap; struct bmsafemap *sentinel; struct ufsmount *ump; struct buf *bp; int error; sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK); sentinel->sm_cg = -1; ump = VFSTOUFS(mp); error = 0; ACQUIRE_LOCK(ump); LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next); for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL; bmsafemap = LIST_NEXT(sentinel, sm_next)) { /* Skip sentinels and cgs with no work to release. */ if (bmsafemap->sm_cg == -1 || (LIST_EMPTY(&bmsafemap->sm_freehd) && LIST_EMPTY(&bmsafemap->sm_freewr))) { LIST_REMOVE(sentinel, sm_next); LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); continue; } /* * If we don't get the lock and we're waiting try again, if * not move on to the next buf and try to sync it. */ bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor); if (bp == NULL && waitfor == MNT_WAIT) continue; LIST_REMOVE(sentinel, sm_next); LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); if (bp == NULL) continue; FREE_LOCK(ump); if (waitfor == MNT_NOWAIT) bawrite(bp); else error = bwrite(bp); ACQUIRE_LOCK(ump); if (error) break; } LIST_REMOVE(sentinel, sm_next); FREE_LOCK(ump); free(sentinel, M_BMSAFEMAP); return (error); } /* * This routine is called when we are trying to synchronously flush a * file. This routine must eliminate any filesystem metadata dependencies * so that the syncing routine can succeed. */ int softdep_sync_metadata(struct vnode *vp) { struct inode *ip; int error; ip = VTOI(vp); KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, ("softdep_sync_metadata called on non-softdep filesystem")); /* * Ensure that any direct block dependencies have been cleared, * truncations are started, and inode references are journaled. */ ACQUIRE_LOCK(VFSTOUFS(vp->v_mount)); /* * Write all journal records to prevent rollbacks on devvp. */ if (vp->v_type == VCHR) softdep_flushjournal(vp->v_mount); error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number); /* * Ensure that all truncates are written so we won't find deps on * indirect blocks. */ process_truncates(vp); FREE_LOCK(VFSTOUFS(vp->v_mount)); return (error); } /* * This routine is called when we are attempting to sync a buf with * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any * other IO it can but returns EBUSY if the buffer is not yet able to * be written. Dependencies which will not cause rollbacks will always * return 0. */ int softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) { struct indirdep *indirdep; struct pagedep *pagedep; struct allocindir *aip; struct newblk *newblk; struct ufsmount *ump; struct buf *nbp; struct worklist *wk; int i, error; KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, ("softdep_sync_buf called on non-softdep filesystem")); /* * For VCHR we just don't want to force flush any dependencies that * will cause rollbacks. */ if (vp->v_type == VCHR) { if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0)) return (EBUSY); return (0); } ump = VFSTOUFS(vp->v_mount); ACQUIRE_LOCK(ump); /* * As we hold the buffer locked, none of its dependencies * will disappear. */ error = 0; top: LIST_FOREACH(wk, &bp->b_dep, wk_list) { switch (wk->wk_type) { case D_ALLOCDIRECT: case D_ALLOCINDIR: newblk = WK_NEWBLK(wk); if (newblk->nb_jnewblk != NULL) { if (waitfor == MNT_NOWAIT) { error = EBUSY; goto out_unlock; } jwait(&newblk->nb_jnewblk->jn_list, waitfor); goto top; } if (newblk->nb_state & DEPCOMPLETE || waitfor == MNT_NOWAIT) continue; nbp = newblk->nb_bmsafemap->sm_buf; nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); if (nbp == NULL) goto top; FREE_LOCK(ump); if ((error = bwrite(nbp)) != 0) goto out; ACQUIRE_LOCK(ump); continue; case D_INDIRDEP: indirdep = WK_INDIRDEP(wk); if (waitfor == MNT_NOWAIT) { if (!TAILQ_EMPTY(&indirdep->ir_trunc) || !LIST_EMPTY(&indirdep->ir_deplisthd)) { error = EBUSY; goto out_unlock; } } if (!TAILQ_EMPTY(&indirdep->ir_trunc)) panic("softdep_sync_buf: truncation pending."); restart: LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { newblk = (struct newblk *)aip; if (newblk->nb_jnewblk != NULL) { jwait(&newblk->nb_jnewblk->jn_list, waitfor); goto restart; } if (newblk->nb_state & DEPCOMPLETE) continue; nbp = newblk->nb_bmsafemap->sm_buf; nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); if (nbp == NULL) goto restart; FREE_LOCK(ump); if ((error = bwrite(nbp)) != 0) goto out; ACQUIRE_LOCK(ump); goto restart; } continue; case D_PAGEDEP: /* * Only flush directory entries in synchronous passes. */ if (waitfor != MNT_WAIT) { error = EBUSY; goto out_unlock; } /* * While syncing snapshots, we must allow recursive * lookups. */ BUF_AREC(bp); /* * We are trying to sync a directory that may * have dependencies on both its own metadata * and/or dependencies on the inodes of any * recently allocated files. We walk its diradd * lists pushing out the associated inode. */ pagedep = WK_PAGEDEP(wk); for (i = 0; i < DAHASHSZ; i++) { if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) continue; if ((error = flush_pagedep_deps(vp, wk->wk_mp, &pagedep->pd_diraddhd[i]))) { BUF_NOREC(bp); goto out_unlock; } } BUF_NOREC(bp); continue; case D_FREEWORK: case D_FREEDEP: case D_JSEGDEP: case D_JNEWBLK: continue; default: panic("softdep_sync_buf: Unknown type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } out_unlock: FREE_LOCK(ump); out: return (error); } /* * Flush the dependencies associated with an inodedep. * Called with splbio blocked. */ static int flush_inodedep_deps(vp, mp, ino) struct vnode *vp; struct mount *mp; ino_t ino; { struct inodedep *inodedep; struct inoref *inoref; struct ufsmount *ump; int error, waitfor; /* * This work is done in two passes. The first pass grabs most * of the buffers and begins asynchronously writing them. The * only way to wait for these asynchronous writes is to sleep * on the filesystem vnode which may stay busy for a long time * if the filesystem is active. So, instead, we make a second * pass over the dependencies blocking on each write. In the * usual case we will be blocking against a write that we * initiated, so when it is done the dependency will have been * resolved. Thus the second pass is expected to end quickly. * We give a brief window at the top of the loop to allow * any pending I/O to complete. */ ump = VFSTOUFS(mp); LOCK_OWNED(ump); for (error = 0, waitfor = MNT_NOWAIT; ; ) { if (error) return (error); FREE_LOCK(ump); ACQUIRE_LOCK(ump); restart: if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) return (0); TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) == DEPCOMPLETE) { jwait(&inoref->if_list, MNT_WAIT); goto restart; } } if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || flush_deplist(&inodedep->id_extupdt, waitfor, &error) || flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) continue; /* * If pass2, we are done, otherwise do pass 2. */ if (waitfor == MNT_WAIT) break; waitfor = MNT_WAIT; } /* * Try freeing inodedep in case all dependencies have been removed. */ if (inodedep_lookup(mp, ino, 0, &inodedep) != 0) (void) free_inodedep(inodedep); return (0); } /* * Flush an inode dependency list. * Called with splbio blocked. */ static int flush_deplist(listhead, waitfor, errorp) struct allocdirectlst *listhead; int waitfor; int *errorp; { struct allocdirect *adp; struct newblk *newblk; struct ufsmount *ump; struct buf *bp; if ((adp = TAILQ_FIRST(listhead)) == NULL) return (0); ump = VFSTOUFS(adp->ad_list.wk_mp); LOCK_OWNED(ump); TAILQ_FOREACH(adp, listhead, ad_next) { newblk = (struct newblk *)adp; if (newblk->nb_jnewblk != NULL) { jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); return (1); } if (newblk->nb_state & DEPCOMPLETE) continue; bp = newblk->nb_bmsafemap->sm_buf; bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor); if (bp == NULL) { if (waitfor == MNT_NOWAIT) continue; return (1); } FREE_LOCK(ump); if (waitfor == MNT_NOWAIT) bawrite(bp); else *errorp = bwrite(bp); ACQUIRE_LOCK(ump); return (1); } return (0); } /* * Flush dependencies associated with an allocdirect block. */ static int flush_newblk_dep(vp, mp, lbn) struct vnode *vp; struct mount *mp; ufs_lbn_t lbn; { struct newblk *newblk; struct ufsmount *ump; struct bufobj *bo; struct inode *ip; struct buf *bp; ufs2_daddr_t blkno; int error; error = 0; bo = &vp->v_bufobj; ip = VTOI(vp); blkno = DIP(ip, i_db[lbn]); if (blkno == 0) panic("flush_newblk_dep: Missing block"); ump = VFSTOUFS(mp); ACQUIRE_LOCK(ump); /* * Loop until all dependencies related to this block are satisfied. * We must be careful to restart after each sleep in case a write * completes some part of this process for us. */ for (;;) { if (newblk_lookup(mp, blkno, 0, &newblk) == 0) { FREE_LOCK(ump); break; } if (newblk->nb_list.wk_type != D_ALLOCDIRECT) panic("flush_newblk_deps: Bad newblk %p", newblk); /* * Flush the journal. */ if (newblk->nb_jnewblk != NULL) { jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); continue; } /* * Write the bitmap dependency. */ if ((newblk->nb_state & DEPCOMPLETE) == 0) { bp = newblk->nb_bmsafemap->sm_buf; bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); if (bp == NULL) continue; FREE_LOCK(ump); error = bwrite(bp); if (error) break; ACQUIRE_LOCK(ump); continue; } /* * Write the buffer. */ FREE_LOCK(ump); BO_LOCK(bo); bp = gbincore(bo, lbn); if (bp != NULL) { error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo)); if (error == ENOLCK) { ACQUIRE_LOCK(ump); error = 0; continue; /* Slept, retry */ } if (error != 0) break; /* Failed */ if (bp->b_flags & B_DELWRI) { bremfree(bp); error = bwrite(bp); if (error) break; } else BUF_UNLOCK(bp); } else BO_UNLOCK(bo); /* * We have to wait for the direct pointers to * point at the newdirblk before the dependency * will go away. */ error = ffs_update(vp, 1); if (error) break; ACQUIRE_LOCK(ump); } return (error); } /* * Eliminate a pagedep dependency by flushing out all its diradd dependencies. * Called with splbio blocked. */ static int flush_pagedep_deps(pvp, mp, diraddhdp) struct vnode *pvp; struct mount *mp; struct diraddhd *diraddhdp; { struct inodedep *inodedep; struct inoref *inoref; struct ufsmount *ump; struct diradd *dap; struct vnode *vp; int error = 0; struct buf *bp; ino_t inum; struct diraddhd unfinished; LIST_INIT(&unfinished); ump = VFSTOUFS(mp); LOCK_OWNED(ump); restart: while ((dap = LIST_FIRST(diraddhdp)) != NULL) { /* * Flush ourselves if this directory entry * has a MKDIR_PARENT dependency. */ if (dap->da_state & MKDIR_PARENT) { FREE_LOCK(ump); if ((error = ffs_update(pvp, 1)) != 0) break; ACQUIRE_LOCK(ump); /* * If that cleared dependencies, go on to next. */ if (dap != LIST_FIRST(diraddhdp)) continue; /* * All MKDIR_PARENT dependencies and all the * NEWBLOCK pagedeps that are contained in direct * blocks were resolved by doing above ffs_update. * Pagedeps contained in indirect blocks may * require a complete sync'ing of the directory. * We are in the midst of doing a complete sync, * so if they are not resolved in this pass we * defer them for now as they will be sync'ed by * our caller shortly. */ LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(&unfinished, dap, da_pdlist); continue; } /* * A newly allocated directory must have its "." and * ".." entries written out before its name can be * committed in its parent. */ inum = dap->da_newinum; if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) panic("flush_pagedep_deps: lost inode1"); /* * Wait for any pending journal adds to complete so we don't * cause rollbacks while syncing. */ TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) == DEPCOMPLETE) { jwait(&inoref->if_list, MNT_WAIT); goto restart; } } if (dap->da_state & MKDIR_BODY) { FREE_LOCK(ump); if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, FFSV_FORCEINSMQ))) break; error = flush_newblk_dep(vp, mp, 0); /* * If we still have the dependency we might need to * update the vnode to sync the new link count to * disk. */ if (error == 0 && dap == LIST_FIRST(diraddhdp)) error = ffs_update(vp, 1); vput(vp); if (error != 0) break; ACQUIRE_LOCK(ump); /* * If that cleared dependencies, go on to next. */ if (dap != LIST_FIRST(diraddhdp)) continue; if (dap->da_state & MKDIR_BODY) { inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); panic("flush_pagedep_deps: MKDIR_BODY " "inodedep %p dap %p vp %p", inodedep, dap, vp); } } /* * Flush the inode on which the directory entry depends. * Having accounted for MKDIR_PARENT and MKDIR_BODY above, * the only remaining dependency is that the updated inode * count must get pushed to disk. The inode has already * been pushed into its inode buffer (via VOP_UPDATE) at * the time of the reference count change. So we need only * locate that buffer, ensure that there will be no rollback * caused by a bitmap dependency, then write the inode buffer. */ retry: if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) panic("flush_pagedep_deps: lost inode"); /* * If the inode still has bitmap dependencies, * push them to disk. */ if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) { bp = inodedep->id_bmsafemap->sm_buf; bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); if (bp == NULL) goto retry; FREE_LOCK(ump); if ((error = bwrite(bp)) != 0) break; ACQUIRE_LOCK(ump); if (dap != LIST_FIRST(diraddhdp)) continue; } /* * If the inode is still sitting in a buffer waiting * to be written or waiting for the link count to be * adjusted update it here to flush it to disk. */ if (dap == LIST_FIRST(diraddhdp)) { FREE_LOCK(ump); if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, FFSV_FORCEINSMQ))) break; error = ffs_update(vp, 1); vput(vp); if (error) break; ACQUIRE_LOCK(ump); } /* * If we have failed to get rid of all the dependencies * then something is seriously wrong. */ if (dap == LIST_FIRST(diraddhdp)) { inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); panic("flush_pagedep_deps: failed to flush " "inodedep %p ino %ju dap %p", inodedep, (uintmax_t)inum, dap); } } if (error) ACQUIRE_LOCK(ump); while ((dap = LIST_FIRST(&unfinished)) != NULL) { LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist); } return (error); } /* * A large burst of file addition or deletion activity can drive the * memory load excessively high. First attempt to slow things down * using the techniques below. If that fails, this routine requests * the offending operations to fall back to running synchronously * until the memory load returns to a reasonable level. */ int softdep_slowdown(vp) struct vnode *vp; { struct ufsmount *ump; int jlow; int max_softdeps_hard; KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, ("softdep_slowdown called on non-softdep filesystem")); ump = VFSTOUFS(vp->v_mount); ACQUIRE_LOCK(ump); jlow = 0; /* * Check for journal space if needed. */ if (DOINGSUJ(vp)) { if (journal_space(ump, 0) == 0) jlow = 1; } /* * If the system is under its limits and our filesystem is * not responsible for more than our share of the usage and * we are not low on journal space, then no need to slow down. */ max_softdeps_hard = max_softdeps * 11 / 10; if (dep_current[D_DIRREM] < max_softdeps_hard / 2 && dep_current[D_INODEDEP] < max_softdeps_hard && dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 && dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 && ump->softdep_curdeps[D_DIRREM] < (max_softdeps_hard / 2) / stat_flush_threads && ump->softdep_curdeps[D_INODEDEP] < max_softdeps_hard / stat_flush_threads && ump->softdep_curdeps[D_INDIRDEP] < (max_softdeps_hard / 1000) / stat_flush_threads && ump->softdep_curdeps[D_FREEBLKS] < max_softdeps_hard / stat_flush_threads) { FREE_LOCK(ump); return (0); } /* * If the journal is low or our filesystem is over its limit * then speedup the cleanup. */ if (ump->softdep_curdeps[D_INDIRDEP] < (max_softdeps_hard / 1000) / stat_flush_threads || jlow) softdep_speedup(ump); stat_sync_limit_hit += 1; FREE_LOCK(ump); /* * We only slow down the rate at which new dependencies are * generated if we are not using journaling. With journaling, * the cleanup should always be sufficient to keep things * under control. */ if (DOINGSUJ(vp)) return (0); return (1); } /* * Called by the allocation routines when they are about to fail * in the hope that we can free up the requested resource (inodes * or disk space). * * First check to see if the work list has anything on it. If it has, * clean up entries until we successfully free the requested resource. * Because this process holds inodes locked, we cannot handle any remove * requests that might block on a locked inode as that could lead to * deadlock. If the worklist yields none of the requested resource, * start syncing out vnodes to free up the needed space. */ int softdep_request_cleanup(fs, vp, cred, resource) struct fs *fs; struct vnode *vp; struct ucred *cred; int resource; { struct ufsmount *ump; struct mount *mp; - struct vnode *lvp, *mvp; long starttime; ufs2_daddr_t needed; - int error; + int error, failed_vnode; /* * If we are being called because of a process doing a * copy-on-write, then it is not safe to process any * worklist items as we will recurse into the copyonwrite * routine. This will result in an incoherent snapshot. * If the vnode that we hold is a snapshot, we must avoid * handling other resources that could cause deadlock. */ if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp))) return (0); if (resource == FLUSH_BLOCKS_WAIT) stat_cleanup_blkrequests += 1; else stat_cleanup_inorequests += 1; mp = vp->v_mount; ump = VFSTOUFS(mp); mtx_assert(UFS_MTX(ump), MA_OWNED); UFS_UNLOCK(ump); error = ffs_update(vp, 1); if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) { UFS_LOCK(ump); return (0); } /* * If we are in need of resources, start by cleaning up * any block removals associated with our inode. */ ACQUIRE_LOCK(ump); process_removes(vp); process_truncates(vp); FREE_LOCK(ump); /* * Now clean up at least as many resources as we will need. * * When requested to clean up inodes, the number that are needed * is set by the number of simultaneous writers (mnt_writeopcount) * plus a bit of slop (2) in case some more writers show up while * we are cleaning. * * When requested to free up space, the amount of space that * we need is enough blocks to allocate a full-sized segment * (fs_contigsumsize). The number of such segments that will * be needed is set by the number of simultaneous writers * (mnt_writeopcount) plus a bit of slop (2) in case some more * writers show up while we are cleaning. * * Additionally, if we are unpriviledged and allocating space, * we need to ensure that we clean up enough blocks to get the * needed number of blocks over the threshold of the minimum * number of blocks required to be kept free by the filesystem * (fs_minfree). */ if (resource == FLUSH_INODES_WAIT) { needed = vp->v_mount->mnt_writeopcount + 2; } else if (resource == FLUSH_BLOCKS_WAIT) { needed = (vp->v_mount->mnt_writeopcount + 2) * fs->fs_contigsumsize; if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0)) needed += fragstoblks(fs, roundup((fs->fs_dsize * fs->fs_minfree / 100) - fs->fs_cstotal.cs_nffree, fs->fs_frag)); } else { UFS_LOCK(ump); printf("softdep_request_cleanup: Unknown resource type %d\n", resource); return (0); } starttime = time_second; retry: if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 && fs->fs_cstotal.cs_nbfree <= needed) || (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && fs->fs_cstotal.cs_nifree <= needed)) { ACQUIRE_LOCK(ump); if (ump->softdep_on_worklist > 0 && process_worklist_item(UFSTOVFS(ump), ump->softdep_on_worklist, LK_NOWAIT) != 0) stat_worklist_push += 1; FREE_LOCK(ump); } /* * If we still need resources and there are no more worklist * entries to process to obtain them, we have to start flushing * the dirty vnodes to force the release of additional requests * to the worklist that we can then process to reap addition * resources. We walk the vnodes associated with the mount point * until we get the needed worklist requests that we can reap. + * + * If there are several threads all needing to clean the same + * mount point, only one is allowed to walk the mount list. + * When several threads all try to walk the same mount list, + * they end up competing with each other and often end up in + * livelock. This approach ensures that forward progress is + * made at the cost of occational ENOSPC errors being returned + * that might otherwise have been avoided. */ + error = 1; if ((resource == FLUSH_BLOCKS_WAIT && fs->fs_cstotal.cs_nbfree <= needed) || (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && fs->fs_cstotal.cs_nifree <= needed)) { - MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { - if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { - VI_UNLOCK(lvp); - continue; + ACQUIRE_LOCK(ump); + if ((ump->um_softdep->sd_flags & FLUSH_RC_ACTIVE) == 0) { + ump->um_softdep->sd_flags |= FLUSH_RC_ACTIVE; + FREE_LOCK(ump); + failed_vnode = softdep_request_cleanup_flush(mp, ump); + ACQUIRE_LOCK(ump); + ump->um_softdep->sd_flags &= ~FLUSH_RC_ACTIVE; + FREE_LOCK(ump); + if (ump->softdep_on_worklist > 0) { + stat_cleanup_retries += 1; + if (!failed_vnode) + goto retry; } - if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT, - curthread)) - continue; - if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */ - vput(lvp); - continue; - } - (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0); - vput(lvp); + } else { + FREE_LOCK(ump); + error = 0; } - lvp = ump->um_devvp; - if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { - VOP_FSYNC(lvp, MNT_NOWAIT, curthread); - VOP_UNLOCK(lvp, 0); - } - if (ump->softdep_on_worklist > 0) { - stat_cleanup_retries += 1; - goto retry; - } stat_cleanup_failures += 1; } if (time_second - starttime > stat_cleanup_high_delay) stat_cleanup_high_delay = time_second - starttime; UFS_LOCK(ump); - return (1); + return (error); +} + +/* + * Scan the vnodes for the specified mount point flushing out any + * vnodes that can be locked without waiting. Finally, try to flush + * the device associated with the mount point if it can be locked + * without waiting. + * + * We return 0 if we were able to lock every vnode in our scan. + * If we had to skip one or more vnodes, we return 1. + */ +static int +softdep_request_cleanup_flush(mp, ump) + struct mount *mp; + struct ufsmount *ump; +{ + struct thread *td; + struct vnode *lvp, *mvp; + int failed_vnode; + + failed_vnode = 0; + td = curthread; + MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { + if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { + VI_UNLOCK(lvp); + continue; + } + if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT, + td) != 0) { + failed_vnode = 1; + continue; + } + if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */ + vput(lvp); + continue; + } + (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0); + vput(lvp); + } + lvp = ump->um_devvp; + if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { + VOP_FSYNC(lvp, MNT_NOWAIT, td); + VOP_UNLOCK(lvp, 0); + } + return (failed_vnode); } static bool softdep_excess_items(struct ufsmount *ump, int item) { KASSERT(item >= 0 && item < D_LAST, ("item %d", item)); return (dep_current[item] > max_softdeps && ump->softdep_curdeps[item] > max_softdeps / stat_flush_threads); } static void schedule_cleanup(struct mount *mp) { struct ufsmount *ump; struct thread *td; ump = VFSTOUFS(mp); LOCK_OWNED(ump); FREE_LOCK(ump); td = curthread; if ((td->td_pflags & TDP_KTHREAD) != 0 && (td->td_proc->p_flag2 & P2_AST_SU) == 0) { /* * No ast is delivered to kernel threads, so nobody * would deref the mp. Some kernel threads * explicitely check for AST, e.g. NFS daemon does * this in the serving loop. */ return; } if (td->td_su != NULL) vfs_rel(td->td_su); vfs_ref(mp); td->td_su = mp; thread_lock(td); td->td_flags |= TDF_ASTPENDING; thread_unlock(td); } static void softdep_ast_cleanup_proc(struct thread *td) { struct mount *mp; struct ufsmount *ump; int error; bool req; while ((mp = td->td_su) != NULL) { td->td_su = NULL; error = vfs_busy(mp, MBF_NOWAIT); vfs_rel(mp); if (error != 0) return; if (ffs_own_mount(mp) && MOUNTEDSOFTDEP(mp)) { ump = VFSTOUFS(mp); for (;;) { req = false; ACQUIRE_LOCK(ump); if (softdep_excess_items(ump, D_INODEDEP)) { req = true; request_cleanup(mp, FLUSH_INODES); } if (softdep_excess_items(ump, D_DIRREM)) { req = true; request_cleanup(mp, FLUSH_BLOCKS); } FREE_LOCK(ump); if (softdep_excess_items(ump, D_NEWBLK) || softdep_excess_items(ump, D_ALLOCDIRECT) || softdep_excess_items(ump, D_ALLOCINDIR)) { error = vn_start_write(NULL, &mp, V_WAIT); if (error == 0) { req = true; VFS_SYNC(mp, MNT_WAIT); vn_finished_write(mp); } } if ((td->td_pflags & TDP_KTHREAD) != 0 || !req) break; } } vfs_unbusy(mp); } if ((mp = td->td_su) != NULL) { td->td_su = NULL; vfs_rel(mp); } } /* * If memory utilization has gotten too high, deliberately slow things * down and speed up the I/O processing. */ static int request_cleanup(mp, resource) struct mount *mp; int resource; { struct thread *td = curthread; struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); /* * We never hold up the filesystem syncer or buf daemon. */ if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF)) return (0); /* * First check to see if the work list has gotten backlogged. * If it has, co-opt this process to help clean up two entries. * Because this process may hold inodes locked, we cannot * handle any remove requests that might block on a locked * inode as that could lead to deadlock. We set TDP_SOFTDEP * to avoid recursively processing the worklist. */ if (ump->softdep_on_worklist > max_softdeps / 10) { td->td_pflags |= TDP_SOFTDEP; process_worklist_item(mp, 2, LK_NOWAIT); td->td_pflags &= ~TDP_SOFTDEP; stat_worklist_push += 2; return(1); } /* * Next, we attempt to speed up the syncer process. If that * is successful, then we allow the process to continue. */ if (softdep_speedup(ump) && resource != FLUSH_BLOCKS_WAIT && resource != FLUSH_INODES_WAIT) return(0); /* * If we are resource constrained on inode dependencies, try * flushing some dirty inodes. Otherwise, we are constrained * by file deletions, so try accelerating flushes of directories * with removal dependencies. We would like to do the cleanup * here, but we probably hold an inode locked at this point and * that might deadlock against one that we try to clean. So, * the best that we can do is request the syncer daemon to do * the cleanup for us. */ switch (resource) { case FLUSH_INODES: case FLUSH_INODES_WAIT: ACQUIRE_GBLLOCK(&lk); stat_ino_limit_push += 1; req_clear_inodedeps += 1; FREE_GBLLOCK(&lk); stat_countp = &stat_ino_limit_hit; break; case FLUSH_BLOCKS: case FLUSH_BLOCKS_WAIT: ACQUIRE_GBLLOCK(&lk); stat_blk_limit_push += 1; req_clear_remove += 1; FREE_GBLLOCK(&lk); stat_countp = &stat_blk_limit_hit; break; default: panic("request_cleanup: unknown type"); } /* * Hopefully the syncer daemon will catch up and awaken us. * We wait at most tickdelay before proceeding in any case. */ ACQUIRE_GBLLOCK(&lk); FREE_LOCK(ump); proc_waiting += 1; if (callout_pending(&softdep_callout) == FALSE) callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, pause_timer, 0); if ((td->td_pflags & TDP_KTHREAD) == 0) msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0); proc_waiting -= 1; FREE_GBLLOCK(&lk); ACQUIRE_LOCK(ump); return (1); } /* * Awaken processes pausing in request_cleanup and clear proc_waiting * to indicate that there is no longer a timer running. Pause_timer * will be called with the global softdep mutex (&lk) locked. */ static void pause_timer(arg) void *arg; { GBLLOCK_OWNED(&lk); /* * The callout_ API has acquired mtx and will hold it around this * function call. */ *stat_countp += proc_waiting; wakeup(&proc_waiting); } /* * If requested, try removing inode or removal dependencies. */ static void check_clear_deps(mp) struct mount *mp; { /* * If we are suspended, it may be because of our using * too many inodedeps, so help clear them out. */ if (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended) clear_inodedeps(mp); /* * General requests for cleanup of backed up dependencies */ ACQUIRE_GBLLOCK(&lk); if (req_clear_inodedeps) { req_clear_inodedeps -= 1; FREE_GBLLOCK(&lk); clear_inodedeps(mp); ACQUIRE_GBLLOCK(&lk); wakeup(&proc_waiting); } if (req_clear_remove) { req_clear_remove -= 1; FREE_GBLLOCK(&lk); clear_remove(mp); ACQUIRE_GBLLOCK(&lk); wakeup(&proc_waiting); } FREE_GBLLOCK(&lk); } /* * Flush out a directory with at least one removal dependency in an effort to * reduce the number of dirrem, freefile, and freeblks dependency structures. */ static void clear_remove(mp) struct mount *mp; { struct pagedep_hashhead *pagedephd; struct pagedep *pagedep; struct ufsmount *ump; struct vnode *vp; struct bufobj *bo; int error, cnt; ino_t ino; ump = VFSTOUFS(mp); LOCK_OWNED(ump); for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) { pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++]; if (ump->pagedep_nextclean > ump->pagedep_hash_size) ump->pagedep_nextclean = 0; LIST_FOREACH(pagedep, pagedephd, pd_hash) { if (LIST_EMPTY(&pagedep->pd_dirremhd)) continue; ino = pagedep->pd_ino; if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) continue; FREE_LOCK(ump); /* * Let unmount clear deps */ error = vfs_busy(mp, MBF_NOWAIT); if (error != 0) goto finish_write; error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, FFSV_FORCEINSMQ); vfs_unbusy(mp); if (error != 0) { softdep_error("clear_remove: vget", error); goto finish_write; } if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) softdep_error("clear_remove: fsync", error); bo = &vp->v_bufobj; BO_LOCK(bo); drain_output(vp); BO_UNLOCK(bo); vput(vp); finish_write: vn_finished_write(mp); ACQUIRE_LOCK(ump); return; } } } /* * Clear out a block of dirty inodes in an effort to reduce * the number of inodedep dependency structures. */ static void clear_inodedeps(mp) struct mount *mp; { struct inodedep_hashhead *inodedephd; struct inodedep *inodedep; struct ufsmount *ump; struct vnode *vp; struct fs *fs; int error, cnt; ino_t firstino, lastino, ino; ump = VFSTOUFS(mp); fs = ump->um_fs; LOCK_OWNED(ump); /* * Pick a random inode dependency to be cleared. * We will then gather up all the inodes in its block * that have dependencies and flush them out. */ for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) { inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++]; if (ump->inodedep_nextclean > ump->inodedep_hash_size) ump->inodedep_nextclean = 0; if ((inodedep = LIST_FIRST(inodedephd)) != NULL) break; } if (inodedep == NULL) return; /* * Find the last inode in the block with dependencies. */ firstino = rounddown2(inodedep->id_ino, INOPB(fs)); for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0) break; /* * Asynchronously push all but the last inode with dependencies. * Synchronously push the last inode with dependencies to ensure * that the inode block gets written to free up the inodedeps. */ for (ino = firstino; ino <= lastino; ino++) { if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) continue; if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) continue; FREE_LOCK(ump); error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */ if (error != 0) { vn_finished_write(mp); ACQUIRE_LOCK(ump); return; } if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, FFSV_FORCEINSMQ)) != 0) { softdep_error("clear_inodedeps: vget", error); vfs_unbusy(mp); vn_finished_write(mp); ACQUIRE_LOCK(ump); return; } vfs_unbusy(mp); if (ino == lastino) { if ((error = ffs_syncvnode(vp, MNT_WAIT, 0))) softdep_error("clear_inodedeps: fsync1", error); } else { if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) softdep_error("clear_inodedeps: fsync2", error); BO_LOCK(&vp->v_bufobj); drain_output(vp); BO_UNLOCK(&vp->v_bufobj); } vput(vp); vn_finished_write(mp); ACQUIRE_LOCK(ump); } } void softdep_buf_append(bp, wkhd) struct buf *bp; struct workhead *wkhd; { struct worklist *wk; struct ufsmount *ump; if ((wk = LIST_FIRST(wkhd)) == NULL) return; KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, ("softdep_buf_append called on non-softdep filesystem")); ump = VFSTOUFS(wk->wk_mp); ACQUIRE_LOCK(ump); while ((wk = LIST_FIRST(wkhd)) != NULL) { WORKLIST_REMOVE(wk); WORKLIST_INSERT(&bp->b_dep, wk); } FREE_LOCK(ump); } void softdep_inode_append(ip, cred, wkhd) struct inode *ip; struct ucred *cred; struct workhead *wkhd; { struct buf *bp; struct fs *fs; struct ufsmount *ump; int error; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_inode_append called on non-softdep filesystem")); fs = ump->um_fs; error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->fs_bsize, cred, &bp); if (error) { bqrelse(bp); softdep_freework(wkhd); return; } softdep_buf_append(bp, wkhd); bqrelse(bp); } void softdep_freework(wkhd) struct workhead *wkhd; { struct worklist *wk; struct ufsmount *ump; if ((wk = LIST_FIRST(wkhd)) == NULL) return; KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, ("softdep_freework called on non-softdep filesystem")); ump = VFSTOUFS(wk->wk_mp); ACQUIRE_LOCK(ump); handle_jwork(wkhd); FREE_LOCK(ump); } /* * Function to determine if the buffer has outstanding dependencies * that will cause a roll-back if the buffer is written. If wantcount * is set, return number of dependencies, otherwise just yes or no. */ static int softdep_count_dependencies(bp, wantcount) struct buf *bp; int wantcount; { struct worklist *wk; struct ufsmount *ump; struct bmsafemap *bmsafemap; struct freework *freework; struct inodedep *inodedep; struct indirdep *indirdep; struct freeblks *freeblks; struct allocindir *aip; struct pagedep *pagedep; struct dirrem *dirrem; struct newblk *newblk; struct mkdir *mkdir; struct diradd *dap; int i, retval; retval = 0; if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) return (0); ump = VFSTOUFS(wk->wk_mp); ACQUIRE_LOCK(ump); LIST_FOREACH(wk, &bp->b_dep, wk_list) { switch (wk->wk_type) { case D_INODEDEP: inodedep = WK_INODEDEP(wk); if ((inodedep->id_state & DEPCOMPLETE) == 0) { /* bitmap allocation dependency */ retval += 1; if (!wantcount) goto out; } if (TAILQ_FIRST(&inodedep->id_inoupdt)) { /* direct block pointer dependency */ retval += 1; if (!wantcount) goto out; } if (TAILQ_FIRST(&inodedep->id_extupdt)) { /* direct block pointer dependency */ retval += 1; if (!wantcount) goto out; } if (TAILQ_FIRST(&inodedep->id_inoreflst)) { /* Add reference dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_INDIRDEP: indirdep = WK_INDIRDEP(wk); TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) { /* indirect truncation dependency */ retval += 1; if (!wantcount) goto out; } LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { /* indirect block pointer dependency */ retval += 1; if (!wantcount) goto out; } continue; case D_PAGEDEP: pagedep = WK_PAGEDEP(wk); LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { if (LIST_FIRST(&dirrem->dm_jremrefhd)) { /* Journal remove ref dependency. */ retval += 1; if (!wantcount) goto out; } } for (i = 0; i < DAHASHSZ; i++) { LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { /* directory entry dependency */ retval += 1; if (!wantcount) goto out; } } continue; case D_BMSAFEMAP: bmsafemap = WK_BMSAFEMAP(wk); if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) { /* Add reference dependency. */ retval += 1; if (!wantcount) goto out; } if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) { /* Allocate block dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_FREEBLKS: freeblks = WK_FREEBLKS(wk); if (LIST_FIRST(&freeblks->fb_jblkdephd)) { /* Freeblk journal dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_ALLOCDIRECT: case D_ALLOCINDIR: newblk = WK_NEWBLK(wk); if (newblk->nb_jnewblk) { /* Journal allocate dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_MKDIR: mkdir = WK_MKDIR(wk); if (mkdir->md_jaddref) { /* Journal reference dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_FREEWORK: case D_FREEDEP: case D_JSEGDEP: case D_JSEG: case D_SBDEP: /* never a dependency on these blocks */ continue; default: panic("softdep_count_dependencies: Unexpected type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } out: FREE_LOCK(ump); return retval; } /* * Acquire exclusive access to a buffer. * Must be called with a locked mtx parameter. * Return acquired buffer or NULL on failure. */ static struct buf * getdirtybuf(bp, lock, waitfor) struct buf *bp; struct rwlock *lock; int waitfor; { int error; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) { if (waitfor != MNT_WAIT) return (NULL); error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock); /* * Even if we successfully acquire bp here, we have dropped * lock, which may violates our guarantee. */ if (error == 0) BUF_UNLOCK(bp); else if (error != ENOLCK) panic("getdirtybuf: inconsistent lock: %d", error); rw_wlock(lock); return (NULL); } if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) { rw_wunlock(lock); BO_LOCK(bp->b_bufobj); BUF_UNLOCK(bp); if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { bp->b_vflags |= BV_BKGRDWAIT; msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO | PDROP, "getbuf", 0); } else BO_UNLOCK(bp->b_bufobj); rw_wlock(lock); return (NULL); } BUF_UNLOCK(bp); if (waitfor != MNT_WAIT) return (NULL); /* * The lock argument must be bp->b_vp's mutex in * this case. */ #ifdef DEBUG_VFS_LOCKS if (bp->b_vp->v_type != VCHR) ASSERT_BO_WLOCKED(bp->b_bufobj); #endif bp->b_vflags |= BV_BKGRDWAIT; rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0); return (NULL); } if ((bp->b_flags & B_DELWRI) == 0) { BUF_UNLOCK(bp); return (NULL); } bremfree(bp); return (bp); } /* * Check if it is safe to suspend the file system now. On entry, * the vnode interlock for devvp should be held. Return 0 with * the mount interlock held if the file system can be suspended now, * otherwise return EAGAIN with the mount interlock held. */ int softdep_check_suspend(struct mount *mp, struct vnode *devvp, int softdep_depcnt, int softdep_accdepcnt, int secondary_writes, int secondary_accwrites) { struct bufobj *bo; struct ufsmount *ump; struct inodedep *inodedep; int error, unlinked; bo = &devvp->v_bufobj; ASSERT_BO_WLOCKED(bo); /* * If we are not running with soft updates, then we need only * deal with secondary writes as we try to suspend. */ if (MOUNTEDSOFTDEP(mp) == 0) { MNT_ILOCK(mp); while (mp->mnt_secondary_writes != 0) { BO_UNLOCK(bo); msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), (PUSER - 1) | PDROP, "secwr", 0); BO_LOCK(bo); MNT_ILOCK(mp); } /* * Reasons for needing more work before suspend: * - Dirty buffers on devvp. * - Secondary writes occurred after start of vnode sync loop */ error = 0; if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0 || secondary_writes != 0 || mp->mnt_secondary_writes != 0 || secondary_accwrites != mp->mnt_secondary_accwrites) error = EAGAIN; BO_UNLOCK(bo); return (error); } /* * If we are running with soft updates, then we need to coordinate * with them as we try to suspend. */ ump = VFSTOUFS(mp); for (;;) { if (!TRY_ACQUIRE_LOCK(ump)) { BO_UNLOCK(bo); ACQUIRE_LOCK(ump); FREE_LOCK(ump); BO_LOCK(bo); continue; } MNT_ILOCK(mp); if (mp->mnt_secondary_writes != 0) { FREE_LOCK(ump); BO_UNLOCK(bo); msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), (PUSER - 1) | PDROP, "secwr", 0); BO_LOCK(bo); continue; } break; } unlinked = 0; if (MOUNTEDSUJ(mp)) { for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked); inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { if ((inodedep->id_state & (UNLINKED | UNLINKLINKS | UNLINKONLIST)) != (UNLINKED | UNLINKLINKS | UNLINKONLIST) || !check_inodedep_free(inodedep)) continue; unlinked++; } } /* * Reasons for needing more work before suspend: * - Dirty buffers on devvp. * - Softdep activity occurred after start of vnode sync loop * - Secondary writes occurred after start of vnode sync loop */ error = 0; if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0 || softdep_depcnt != unlinked || ump->softdep_deps != unlinked || softdep_accdepcnt != ump->softdep_accdeps || secondary_writes != 0 || mp->mnt_secondary_writes != 0 || secondary_accwrites != mp->mnt_secondary_accwrites) error = EAGAIN; FREE_LOCK(ump); BO_UNLOCK(bo); return (error); } /* * Get the number of dependency structures for the file system, both * the current number and the total number allocated. These will * later be used to detect that softdep processing has occurred. */ void softdep_get_depcounts(struct mount *mp, int *softdep_depsp, int *softdep_accdepsp) { struct ufsmount *ump; if (MOUNTEDSOFTDEP(mp) == 0) { *softdep_depsp = 0; *softdep_accdepsp = 0; return; } ump = VFSTOUFS(mp); ACQUIRE_LOCK(ump); *softdep_depsp = ump->softdep_deps; *softdep_accdepsp = ump->softdep_accdeps; FREE_LOCK(ump); } /* * Wait for pending output on a vnode to complete. * Must be called with vnode lock and interlock locked. * * XXX: Should just be a call to bufobj_wwait(). */ static void drain_output(vp) struct vnode *vp; { struct bufobj *bo; bo = &vp->v_bufobj; ASSERT_VOP_LOCKED(vp, "drain_output"); ASSERT_BO_WLOCKED(bo); while (bo->bo_numoutput) { bo->bo_flag |= BO_WWAIT; msleep((caddr_t)&bo->bo_numoutput, BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0); } } /* * Called whenever a buffer that is being invalidated or reallocated * contains dependencies. This should only happen if an I/O error has * occurred. The routine is called with the buffer locked. */ static void softdep_deallocate_dependencies(bp) struct buf *bp; { if ((bp->b_ioflags & BIO_ERROR) == 0) panic("softdep_deallocate_dependencies: dangling deps"); if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL) softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); else printf("softdep_deallocate_dependencies: " "got error %d while accessing filesystem\n", bp->b_error); if (bp->b_error != ENXIO) panic("softdep_deallocate_dependencies: unrecovered I/O error"); } /* * Function to handle asynchronous write errors in the filesystem. */ static void softdep_error(func, error) char *func; int error; { /* XXX should do something better! */ printf("%s: got error %d while accessing filesystem\n", func, error); } #ifdef DDB static void inodedep_print(struct inodedep *inodedep, int verbose) { db_printf("%p fs %p st %x ino %jd inoblk %jd delta %jd nlink %jd" " saveino %p\n", inodedep, inodedep->id_fs, inodedep->id_state, (intmax_t)inodedep->id_ino, (intmax_t)fsbtodb(inodedep->id_fs, ino_to_fsba(inodedep->id_fs, inodedep->id_ino)), (intmax_t)inodedep->id_nlinkdelta, (intmax_t)inodedep->id_savednlink, inodedep->id_savedino1); if (verbose == 0) return; db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, " "mkdiradd %p\n", LIST_FIRST(&inodedep->id_pendinghd), LIST_FIRST(&inodedep->id_bufwait), LIST_FIRST(&inodedep->id_inowait), TAILQ_FIRST(&inodedep->id_inoreflst), inodedep->id_mkdiradd); db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n", TAILQ_FIRST(&inodedep->id_inoupdt), TAILQ_FIRST(&inodedep->id_newinoupdt), TAILQ_FIRST(&inodedep->id_extupdt), TAILQ_FIRST(&inodedep->id_newextupdt)); } DB_SHOW_COMMAND(inodedep, db_show_inodedep) { if (have_addr == 0) { db_printf("Address required\n"); return; } inodedep_print((struct inodedep*)addr, 1); } DB_SHOW_COMMAND(inodedeps, db_show_inodedeps) { struct inodedep_hashhead *inodedephd; struct inodedep *inodedep; struct ufsmount *ump; int cnt; if (have_addr == 0) { db_printf("Address required\n"); return; } ump = (struct ufsmount *)addr; for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) { inodedephd = &ump->inodedep_hashtbl[cnt]; LIST_FOREACH(inodedep, inodedephd, id_hash) { inodedep_print(inodedep, 0); } } } DB_SHOW_COMMAND(worklist, db_show_worklist) { struct worklist *wk; if (have_addr == 0) { db_printf("Address required\n"); return; } wk = (struct worklist *)addr; printf("worklist: %p type %s state 0x%X\n", wk, TYPENAME(wk->wk_type), wk->wk_state); } DB_SHOW_COMMAND(workhead, db_show_workhead) { struct workhead *wkhd; struct worklist *wk; int i; if (have_addr == 0) { db_printf("Address required\n"); return; } wkhd = (struct workhead *)addr; wk = LIST_FIRST(wkhd); for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list)) db_printf("worklist: %p type %s state 0x%X", wk, TYPENAME(wk->wk_type), wk->wk_state); if (i == 100) db_printf("workhead overflow"); printf("\n"); } DB_SHOW_COMMAND(mkdirs, db_show_mkdirs) { struct mkdirlist *mkdirlisthd; struct jaddref *jaddref; struct diradd *diradd; struct mkdir *mkdir; if (have_addr == 0) { db_printf("Address required\n"); return; } mkdirlisthd = (struct mkdirlist *)addr; LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) { diradd = mkdir->md_diradd; db_printf("mkdir: %p state 0x%X dap %p state 0x%X", mkdir, mkdir->md_state, diradd, diradd->da_state); if ((jaddref = mkdir->md_jaddref) != NULL) db_printf(" jaddref %p jaddref state 0x%X", jaddref, jaddref->ja_state); db_printf("\n"); } } /* exported to ffs_vfsops.c */ extern void db_print_ffs(struct ufsmount *ump); void db_print_ffs(struct ufsmount *ump) { db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n", ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname, ump->um_devvp, ump->um_fs, ump->softdep_on_worklist, ump->softdep_deps, ump->softdep_req); } #endif /* DDB */ #endif /* SOFTUPDATES */ Index: projects/clang500-import/sys/ufs/ffs/ffs_vfsops.c =================================================================== --- projects/clang500-import/sys/ufs/ffs/ffs_vfsops.c (revision 319548) +++ projects/clang500-import/sys/ufs/ffs/ffs_vfsops.c (revision 319549) @@ -1,2286 +1,2290 @@ /*- * Copyright (c) 1989, 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 */ #include __FBSDID("$FreeBSD$"); #include "opt_quota.h" #include "opt_ufs.h" #include "opt_ffs.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, ufs2_daddr_t); static void ffs_ifree(struct ufsmount *ump, struct inode *ip); static int ffs_sync_lazy(struct mount *mp); static vfs_init_t ffs_init; static vfs_uninit_t ffs_uninit; static vfs_extattrctl_t ffs_extattrctl; static vfs_cmount_t ffs_cmount; static vfs_unmount_t ffs_unmount; static vfs_mount_t ffs_mount; static vfs_statfs_t ffs_statfs; static vfs_fhtovp_t ffs_fhtovp; static vfs_sync_t ffs_sync; static struct vfsops ufs_vfsops = { .vfs_extattrctl = ffs_extattrctl, .vfs_fhtovp = ffs_fhtovp, .vfs_init = ffs_init, .vfs_mount = ffs_mount, .vfs_cmount = ffs_cmount, .vfs_quotactl = ufs_quotactl, .vfs_root = ufs_root, .vfs_statfs = ffs_statfs, .vfs_sync = ffs_sync, .vfs_uninit = ffs_uninit, .vfs_unmount = ffs_unmount, .vfs_vget = ffs_vget, .vfs_susp_clean = process_deferred_inactive, }; VFS_SET(ufs_vfsops, ufs, 0); MODULE_VERSION(ufs, 1); static b_strategy_t ffs_geom_strategy; static b_write_t ffs_bufwrite; static struct buf_ops ffs_ops = { .bop_name = "FFS", .bop_write = ffs_bufwrite, .bop_strategy = ffs_geom_strategy, .bop_sync = bufsync, #ifdef NO_FFS_SNAPSHOT .bop_bdflush = bufbdflush, #else .bop_bdflush = ffs_bdflush, #endif }; /* * Note that userquota and groupquota options are not currently used * by UFS/FFS code and generally mount(8) does not pass those options * from userland, but they can be passed by loader(8) via * vfs.root.mountfrom.options. */ static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", "noclusterw", "noexec", "export", "force", "from", "groupquota", "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir", "nosymfollow", "sync", "union", "userquota", NULL }; static int ffs_mount(struct mount *mp) { struct vnode *devvp; struct thread *td; struct ufsmount *ump = NULL; struct fs *fs; pid_t fsckpid = 0; int error, error1, flags; uint64_t mntorflags; accmode_t accmode; struct nameidata ndp; char *fspec; td = curthread; if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) return (EINVAL); if (uma_inode == NULL) { uma_inode = uma_zcreate("FFS inode", sizeof(struct inode), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); uma_ufs1 = uma_zcreate("FFS1 dinode", sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); uma_ufs2 = uma_zcreate("FFS2 dinode", sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); } vfs_deleteopt(mp->mnt_optnew, "groupquota"); vfs_deleteopt(mp->mnt_optnew, "userquota"); fspec = vfs_getopts(mp->mnt_optnew, "from", &error); if (error) return (error); mntorflags = 0; if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) mntorflags |= MNT_ACLS; if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { mntorflags |= MNT_SNAPSHOT; /* * Once we have set the MNT_SNAPSHOT flag, do not * persist "snapshot" in the options list. */ vfs_deleteopt(mp->mnt_optnew, "snapshot"); vfs_deleteopt(mp->mnt_opt, "snapshot"); } if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 && vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) { /* * Once we have set the restricted PID, do not * persist "fsckpid" in the options list. */ vfs_deleteopt(mp->mnt_optnew, "fsckpid"); vfs_deleteopt(mp->mnt_opt, "fsckpid"); if (mp->mnt_flag & MNT_UPDATE) { if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 && vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { vfs_mount_error(mp, "Checker enable: Must be read-only"); return (EINVAL); } } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { vfs_mount_error(mp, "Checker enable: Must be read-only"); return (EINVAL); } /* Set to -1 if we are done */ if (fsckpid == 0) fsckpid = -1; } if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { if (mntorflags & MNT_ACLS) { vfs_mount_error(mp, "\"acls\" and \"nfsv4acls\" options " "are mutually exclusive"); return (EINVAL); } mntorflags |= MNT_NFS4ACLS; } MNT_ILOCK(mp); mp->mnt_flag |= mntorflags; MNT_IUNLOCK(mp); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOUFS(mp); fs = ump->um_fs; devvp = ump->um_devvp; if (fsckpid == -1 && ump->um_fsckpid > 0) { if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 || (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) return (error); g_topology_lock(); /* * Return to normal read-only mode. */ error = g_access(ump->um_cp, 0, -1, 0); g_topology_unlock(); ump->um_fsckpid = 0; } if (fs->fs_ronly == 0 && vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { /* * Flush any dirty data and suspend filesystem. */ if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) return (error); error = vfs_write_suspend_umnt(mp); if (error != 0) return (error); /* * Check for and optionally get rid of files open * for writing. */ flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; if (MOUNTEDSOFTDEP(mp)) { error = softdep_flushfiles(mp, flags, td); } else { error = ffs_flushfiles(mp, flags, td); } if (error) { vfs_write_resume(mp, 0); return (error); } if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { printf("WARNING: %s Update error: blocks %jd " "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) fs->fs_clean = 1; if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { fs->fs_ronly = 0; fs->fs_clean = 0; vfs_write_resume(mp, 0); return (error); } if (MOUNTEDSOFTDEP(mp)) softdep_unmount(mp); g_topology_lock(); /* * Drop our write and exclusive access. */ g_access(ump->um_cp, 0, -1, -1); g_topology_unlock(); fs->fs_ronly = 1; MNT_ILOCK(mp); mp->mnt_flag |= MNT_RDONLY; MNT_IUNLOCK(mp); /* * Allow the writers to note that filesystem * is ro now. */ vfs_write_resume(mp, 0); } if ((mp->mnt_flag & MNT_RELOAD) && (error = ffs_reload(mp, td, 0)) != 0) return (error); if (fs->fs_ronly && !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { /* * If we are running a checker, do not allow upgrade. */ if (ump->um_fsckpid > 0) { vfs_mount_error(mp, "Active checker, cannot upgrade to write"); return (EINVAL); } /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { VOP_UNLOCK(devvp, 0); return (error); } VOP_UNLOCK(devvp, 0); fs->fs_flags &= ~FS_UNCLEAN; if (fs->fs_clean == 0) { fs->fs_flags |= FS_UNCLEAN; if ((mp->mnt_flag & MNT_FORCE) || ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && (fs->fs_flags & FS_DOSOFTDEP))) { printf("WARNING: %s was not properly " "dismounted\n", fs->fs_fsmnt); } else { vfs_mount_error(mp, "R/W mount of %s denied. %s.%s", fs->fs_fsmnt, "Filesystem is not clean - run fsck", (fs->fs_flags & FS_SUJ) == 0 ? "" : " Forced mount will invalidate" " journal contents"); return (EPERM); } } g_topology_lock(); /* * Request exclusive write access. */ error = g_access(ump->um_cp, 0, 1, 1); g_topology_unlock(); if (error) return (error); if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) return (error); fs->fs_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); fs->fs_mtime = time_second; /* check to see if we need to start softdep */ if ((fs->fs_flags & FS_DOSOFTDEP) && (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ vn_finished_write(mp); return (error); } fs->fs_clean = 0; if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { vn_finished_write(mp); return (error); } if (fs->fs_snapinum[0] != 0) ffs_snapshot_mount(mp); vn_finished_write(mp); } /* * Soft updates is incompatible with "async", * so if we are doing softupdates stop the user * from setting the async flag in an update. * Softdep_mount() clears it in an initial mount * or ro->rw remount. */ if (MOUNTEDSOFTDEP(mp)) { /* XXX: Reset too late ? */ MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_ASYNC; MNT_IUNLOCK(mp); } /* * Keep MNT_ACLS flag if it is stored in superblock. */ if ((fs->fs_flags & FS_ACLS) != 0) { /* XXX: Set too late ? */ MNT_ILOCK(mp); mp->mnt_flag |= MNT_ACLS; MNT_IUNLOCK(mp); } if ((fs->fs_flags & FS_NFS4ACLS) != 0) { /* XXX: Set too late ? */ MNT_ILOCK(mp); mp->mnt_flag |= MNT_NFS4ACLS; MNT_IUNLOCK(mp); } /* * If this is a request from fsck to clean up the filesystem, * then allow the specified pid to proceed. */ if (fsckpid > 0) { if (ump->um_fsckpid != 0) { vfs_mount_error(mp, "Active checker already running on %s", fs->fs_fsmnt); return (EINVAL); } KASSERT(MOUNTEDSOFTDEP(mp) == 0, ("soft updates enabled on read-only file system")); g_topology_lock(); /* * Request write access. */ error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); if (error) { vfs_mount_error(mp, "Checker activation failed on %s", fs->fs_fsmnt); return (error); } ump->um_fsckpid = fsckpid; if (fs->fs_snapinum[0] != 0) ffs_snapshot_mount(mp); fs->fs_mtime = time_second; fs->fs_fmod = 1; fs->fs_clean = 0; (void) ffs_sbupdate(ump, MNT_WAIT, 0); } /* * If this is a snapshot request, take the snapshot. */ if (mp->mnt_flag & MNT_SNAPSHOT) return (ffs_snapshot(mp, fspec)); /* * Must not call namei() while owning busy ref. */ vfs_unbusy(mp); } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); error = namei(&ndp); if ((mp->mnt_flag & MNT_UPDATE) != 0) { /* * Unmount does not start if MNT_UPDATE is set. Mount * update busies mp before setting MNT_UPDATE. We * must be able to retain our busy ref succesfully, * without sleep. */ error1 = vfs_busy(mp, MBF_NOWAIT); MPASS(error1 == 0); } if (error != 0) return (error); NDFREE(&ndp, NDF_ONLY_PNBUF); devvp = ndp.ni_vp; if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ accmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accmode |= VWRITE; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if (mp->mnt_flag & MNT_UPDATE) { /* * Update only * * If it's not the same vnode, or at least the same device * then it's not correct. */ if (devvp->v_rdev != ump->um_devvp->v_rdev) error = EINVAL; /* needs translation */ vput(devvp); if (error) return (error); } else { /* * New mount * * We need the name for the mount point (also used for * "last mounted on") copied in. If an error occurs, * the mount point is discarded by the upper level code. * Note that vfs_mount_alloc() populates f_mntonname for us. */ if ((error = ffs_mountfs(devvp, mp, td)) != 0) { vrele(devvp); return (error); } if (fsckpid > 0) { KASSERT(MOUNTEDSOFTDEP(mp) == 0, ("soft updates enabled on read-only file system")); ump = VFSTOUFS(mp); fs = ump->um_fs; g_topology_lock(); /* * Request write access. */ error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); if (error) { printf("WARNING: %s: Checker activation " "failed\n", fs->fs_fsmnt); } else { ump->um_fsckpid = fsckpid; if (fs->fs_snapinum[0] != 0) ffs_snapshot_mount(mp); fs->fs_mtime = time_second; fs->fs_clean = 0; (void) ffs_sbupdate(ump, MNT_WAIT, 0); } } } vfs_mountedfrom(mp, fspec); return (0); } /* * Compatibility with old mount system call. */ static int ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) { struct ufs_args args; struct export_args exp; int error; if (data == NULL) return (EINVAL); error = copyin(data, &args, sizeof args); if (error) return (error); vfs_oexport_conv(&args.export, &exp); ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); ma = mount_arg(ma, "export", &exp, sizeof(exp)); error = kernel_mount(ma, flags); return (error); } /* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). If the 'force' flag * is 0, the filesystem must be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) re-read summary information from disk. * 4) invalidate all inactive vnodes. * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary * writers, if requested. * 6) invalidate all cached file data. * 7) re-read inode data for all active vnodes. */ int ffs_reload(struct mount *mp, struct thread *td, int flags) { struct vnode *vp, *mvp, *devvp; struct inode *ip; void *space; struct buf *bp; struct fs *fs, *newfs; struct ufsmount *ump; ufs2_daddr_t sblockloc; int i, blks, error; u_long size; int32_t *lp; ump = VFSTOUFS(mp); MNT_ILOCK(mp); if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { MNT_IUNLOCK(mp); return (EINVAL); } MNT_IUNLOCK(mp); /* * Step 1: invalidate all cached meta-data. */ devvp = VFSTOUFS(mp)->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); if (vinvalbuf(devvp, 0, 0, 0) != 0) panic("ffs_reload: dirty1"); VOP_UNLOCK(devvp, 0); /* * Step 2: re-read superblock from disk. */ fs = VFSTOUFS(mp)->um_fs; if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, NOCRED, &bp)) != 0) return (error); newfs = (struct fs *)bp->b_data; if ((newfs->fs_magic != FS_UFS1_MAGIC && newfs->fs_magic != FS_UFS2_MAGIC) || newfs->fs_bsize > MAXBSIZE || newfs->fs_bsize < sizeof(struct fs)) { brelse(bp); return (EIO); /* XXX needs translation */ } /* * Copy pointer fields back into superblock before copying in XXX * new superblock. These should really be in the ufsmount. XXX * Note that important parameters (eg fs_ncg) are unchanged. */ newfs->fs_csp = fs->fs_csp; newfs->fs_maxcluster = fs->fs_maxcluster; newfs->fs_contigdirs = fs->fs_contigdirs; newfs->fs_active = fs->fs_active; newfs->fs_ronly = fs->fs_ronly; sblockloc = fs->fs_sblockloc; bcopy(newfs, fs, (u_int)fs->fs_sbsize); brelse(bp); mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); UFS_LOCK(ump); if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { printf("WARNING: %s: reload pending error: blocks %jd " "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } UFS_UNLOCK(ump); /* * Step 3: re-read summary information from disk. */ size = fs->fs_cssize; blks = howmany(size, fs->fs_fsize); if (fs->fs_contigsumsize > 0) size += fs->fs_ncg * sizeof(int32_t); size += fs->fs_ncg * sizeof(u_int8_t); free(fs->fs_csp, M_UFSMNT); space = malloc(size, M_UFSMNT, M_WAITOK); fs->fs_csp = space; for (i = 0; i < blks; i += fs->fs_frag) { size = fs->fs_bsize; if (i + fs->fs_frag > blks) size = (blks - i) * fs->fs_fsize; error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, NOCRED, &bp); if (error) return (error); bcopy(bp->b_data, space, (u_int)size); space = (char *)space + size; brelse(bp); } /* * We no longer know anything about clusters per cylinder group. */ if (fs->fs_contigsumsize > 0) { fs->fs_maxcluster = lp = space; for (i = 0; i < fs->fs_ncg; i++) *lp++ = fs->fs_contigsumsize; space = lp; } size = fs->fs_ncg * sizeof(u_int8_t); fs->fs_contigdirs = (u_int8_t *)space; bzero(fs->fs_contigdirs, size); if ((flags & FFSR_UNSUSPEND) != 0) { MNT_ILOCK(mp); mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); wakeup(&mp->mnt_flag); MNT_IUNLOCK(mp); } loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * Skip syncer vnode. */ if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } /* * Step 4: invalidate all cached file data. */ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } if (vinvalbuf(vp, 0, 0, 0)) panic("ffs_reload: dirty2"); /* * Step 5: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->fs_bsize, NOCRED, &bp); if (error) { VOP_UNLOCK(vp, 0); vrele(vp); MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); return (error); } ffs_load_inode(bp, ip, fs, ip->i_number); ip->i_effnlink = ip->i_nlink; brelse(bp); VOP_UNLOCK(vp, 0); vrele(vp); } return (0); } /* * Possible superblock locations ordered from most to least likely. */ static int sblock_try[] = SBLOCKSEARCH; /* * Common code for mount and mountroot */ static int ffs_mountfs(devvp, mp, td) struct vnode *devvp; struct mount *mp; struct thread *td; { struct ufsmount *ump; struct buf *bp; struct fs *fs; struct cdev *dev; void *space; ufs2_daddr_t sblockloc; int error, i, blks, len, ronly; u_long size; int32_t *lp; struct ucred *cred; struct g_consumer *cp; struct mount *nmp; bp = NULL; ump = NULL; cred = td ? td->td_ucred : NOCRED; ronly = (mp->mnt_flag & MNT_RDONLY) != 0; KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); dev = devvp->v_rdev; if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, (uintptr_t)mp) == 0) { VOP_UNLOCK(devvp, 0); return (EBUSY); } g_topology_lock(); error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); g_topology_unlock(); if (error != 0) { atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); VOP_UNLOCK(devvp, 0); return (error); } dev_ref(dev); devvp->v_bufobj.bo_ops = &ffs_ops; VOP_UNLOCK(devvp, 0); if (dev->si_iosize_max != 0) mp->mnt_iosize_max = dev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; fs = NULL; sblockloc = 0; /* * Try reading the superblock in each of its possible locations. */ for (i = 0; sblock_try[i] != -1; i++) { if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { error = EINVAL; vfs_mount_error(mp, "Invalid sectorsize %d for superblock size %d", cp->provider->sectorsize, SBLOCKSIZE); goto out; } if ((error = bread(devvp, btodb(sblock_try[i]), SBLOCKSIZE, cred, &bp)) != 0) goto out; fs = (struct fs *)bp->b_data; sblockloc = sblock_try[i]; if ((fs->fs_magic == FS_UFS1_MAGIC || (fs->fs_magic == FS_UFS2_MAGIC && (fs->fs_sblockloc == sblockloc || (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) && fs->fs_bsize <= MAXBSIZE && fs->fs_bsize >= sizeof(struct fs)) break; brelse(bp); bp = NULL; } if (sblock_try[i] == -1) { error = EINVAL; /* XXX needs translation */ goto out; } fs->fs_fmod = 0; fs->fs_flags &= ~FS_INDEXDIRS; /* no support for directory indices */ fs->fs_flags &= ~FS_UNCLEAN; if (fs->fs_clean == 0) { fs->fs_flags |= FS_UNCLEAN; if (ronly || (mp->mnt_flag & MNT_FORCE) || ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && (fs->fs_flags & FS_DOSOFTDEP))) { printf("WARNING: %s was not properly dismounted\n", fs->fs_fsmnt); } else { vfs_mount_error(mp, "R/W mount of %s denied. %s%s", fs->fs_fsmnt, "Filesystem is not clean - run fsck.", (fs->fs_flags & FS_SUJ) == 0 ? "" : " Forced mount will invalidate journal contents"); error = EPERM; goto out; } if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && (mp->mnt_flag & MNT_FORCE)) { printf("WARNING: %s: lost blocks %jd files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } } if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { printf("WARNING: %s: mount pending error: blocks %jd " "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } if ((fs->fs_flags & FS_GJOURNAL) != 0) { #ifdef UFS_GJOURNAL /* * Get journal provider name. */ len = 1024; mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); if (g_io_getattr("GJOURNAL::provider", cp, &len, mp->mnt_gjprovider) == 0) { mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, M_UFSMNT, M_WAITOK); MNT_ILOCK(mp); mp->mnt_flag |= MNT_GJOURNAL; MNT_IUNLOCK(mp); } else { printf("WARNING: %s: GJOURNAL flag on fs " "but no gjournal provider below\n", mp->mnt_stat.f_mntonname); free(mp->mnt_gjprovider, M_UFSMNT); mp->mnt_gjprovider = NULL; } #else printf("WARNING: %s: GJOURNAL flag on fs but no " "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); #endif } else { mp->mnt_gjprovider = NULL; } ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); ump->um_cp = cp; ump->um_bo = &devvp->v_bufobj; ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, M_WAITOK); if (fs->fs_magic == FS_UFS1_MAGIC) { ump->um_fstype = UFS1; ump->um_balloc = ffs_balloc_ufs1; } else { ump->um_fstype = UFS2; ump->um_balloc = ffs_balloc_ufs2; } ump->um_blkatoff = ffs_blkatoff; ump->um_truncate = ffs_truncate; ump->um_update = ffs_update; ump->um_valloc = ffs_valloc; ump->um_vfree = ffs_vfree; ump->um_ifree = ffs_ifree; ump->um_rdonly = ffs_rdonly; ump->um_snapgone = ffs_snapgone; mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); if (fs->fs_sbsize < SBLOCKSIZE) bp->b_flags |= B_INVAL | B_NOCACHE; brelse(bp); bp = NULL; fs = ump->um_fs; ffs_oldfscompat_read(fs, ump, sblockloc); fs->fs_ronly = ronly; size = fs->fs_cssize; blks = howmany(size, fs->fs_fsize); if (fs->fs_contigsumsize > 0) size += fs->fs_ncg * sizeof(int32_t); size += fs->fs_ncg * sizeof(u_int8_t); space = malloc(size, M_UFSMNT, M_WAITOK); fs->fs_csp = space; for (i = 0; i < blks; i += fs->fs_frag) { size = fs->fs_bsize; if (i + fs->fs_frag > blks) size = (blks - i) * fs->fs_fsize; if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, cred, &bp)) != 0) { free(fs->fs_csp, M_UFSMNT); goto out; } bcopy(bp->b_data, space, (u_int)size); space = (char *)space + size; brelse(bp); bp = NULL; } if (fs->fs_contigsumsize > 0) { fs->fs_maxcluster = lp = space; for (i = 0; i < fs->fs_ncg; i++) *lp++ = fs->fs_contigsumsize; space = lp; } size = fs->fs_ncg * sizeof(u_int8_t); fs->fs_contigdirs = (u_int8_t *)space; bzero(fs->fs_contigdirs, size); fs->fs_active = NULL; mp->mnt_data = ump; mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; nmp = NULL; if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { if (nmp) vfs_rel(nmp); vfs_getnewfsid(mp); } mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); if ((fs->fs_flags & FS_MULTILABEL) != 0) { #ifdef MAC MNT_ILOCK(mp); mp->mnt_flag |= MNT_MULTILABEL; MNT_IUNLOCK(mp); #else printf("WARNING: %s: multilabel flag on fs but " "no MAC support\n", mp->mnt_stat.f_mntonname); #endif } if ((fs->fs_flags & FS_ACLS) != 0) { #ifdef UFS_ACL MNT_ILOCK(mp); if (mp->mnt_flag & MNT_NFS4ACLS) printf("WARNING: %s: ACLs flag on fs conflicts with " "\"nfsv4acls\" mount option; option ignored\n", mp->mnt_stat.f_mntonname); mp->mnt_flag &= ~MNT_NFS4ACLS; mp->mnt_flag |= MNT_ACLS; MNT_IUNLOCK(mp); #else printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", mp->mnt_stat.f_mntonname); #endif } if ((fs->fs_flags & FS_NFS4ACLS) != 0) { #ifdef UFS_ACL MNT_ILOCK(mp); if (mp->mnt_flag & MNT_ACLS) printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " "with \"acls\" mount option; option ignored\n", mp->mnt_stat.f_mntonname); mp->mnt_flag &= ~MNT_ACLS; mp->mnt_flag |= MNT_NFS4ACLS; MNT_IUNLOCK(mp); #else printf("WARNING: %s: NFSv4 ACLs flag on fs but no " "ACLs support\n", mp->mnt_stat.f_mntonname); #endif } if ((fs->fs_flags & FS_TRIM) != 0) { len = sizeof(int); if (g_io_getattr("GEOM::candelete", cp, &len, &ump->um_candelete) == 0) { if (!ump->um_candelete) printf("WARNING: %s: TRIM flag on fs but disk " "does not support TRIM\n", mp->mnt_stat.f_mntonname); } else { printf("WARNING: %s: TRIM flag on fs but disk does " "not confirm that it supports TRIM\n", mp->mnt_stat.f_mntonname); ump->um_candelete = 0; } if (ump->um_candelete) { ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, taskqueue_thread_enqueue, &ump->um_trim_tq); taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, "%s trim", mp->mnt_stat.f_mntonname); } } ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_nindir = fs->fs_nindir; ump->um_bptrtodb = fs->fs_fsbtodb; ump->um_seqinc = fs->fs_frag; for (i = 0; i < MAXQUOTAS; i++) ump->um_quotas[i] = NULLVP; #ifdef UFS_EXTATTR ufs_extattr_uepm_init(&ump->um_extattr); #endif /* * Set FS local "last mounted on" information (NULL pad) */ bzero(fs->fs_fsmnt, MAXMNTLEN); strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); mp->mnt_stat.f_iosize = fs->fs_bsize; if (mp->mnt_flag & MNT_ROOTFS) { /* * Root mount; update timestamp in mount structure. * this will be used by the common root mount code * to update the system clock. */ mp->mnt_time = fs->fs_time; } if (ronly == 0) { fs->fs_mtime = time_second; if ((fs->fs_flags & FS_DOSOFTDEP) && (error = softdep_mount(devvp, mp, fs, cred)) != 0) { free(fs->fs_csp, M_UFSMNT); ffs_flushfiles(mp, FORCECLOSE, td); goto out; } if (fs->fs_snapinum[0] != 0) ffs_snapshot_mount(mp); fs->fs_fmod = 1; fs->fs_clean = 0; (void) ffs_sbupdate(ump, MNT_WAIT, 0); } /* * Initialize filesystem state information in mount struct. */ MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; MNT_IUNLOCK(mp); #ifdef UFS_EXTATTR #ifdef UFS_EXTATTR_AUTOSTART /* * * Auto-starting does the following: * - check for /.attribute in the fs, and extattr_start if so * - for each file in .attribute, enable that file with * an attribute of the same name. * Not clear how to report errors -- probably eat them. * This would all happen while the filesystem was busy/not * available, so would effectively be "atomic". */ (void) ufs_extattr_autostart(mp, td); #endif /* !UFS_EXTATTR_AUTOSTART */ #endif /* !UFS_EXTATTR */ return (0); out: if (bp) brelse(bp); if (cp != NULL) { g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); } if (ump) { mtx_destroy(UFS_MTX(ump)); if (mp->mnt_gjprovider != NULL) { free(mp->mnt_gjprovider, M_UFSMNT); mp->mnt_gjprovider = NULL; } free(ump->um_fs, M_UFSMNT); free(ump, M_UFSMNT); mp->mnt_data = NULL; } atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); dev_rel(dev); return (error); } #include static int bigcgs = 0; SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); /* * Sanity checks for loading old filesystem superblocks. * See ffs_oldfscompat_write below for unwound actions. * * XXX - Parts get retired eventually. * Unfortunately new bits get added. */ static void ffs_oldfscompat_read(fs, ump, sblockloc) struct fs *fs; struct ufsmount *ump; ufs2_daddr_t sblockloc; { off_t maxfilesize; /* * If not yet done, update fs_flags location and value of fs_sblockloc. */ if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { fs->fs_flags = fs->fs_old_flags; fs->fs_old_flags |= FS_FLAGS_UPDATED; fs->fs_sblockloc = sblockloc; } /* * If not yet done, update UFS1 superblock with new wider fields. */ if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { fs->fs_maxbsize = fs->fs_bsize; fs->fs_time = fs->fs_old_time; fs->fs_size = fs->fs_old_size; fs->fs_dsize = fs->fs_old_dsize; fs->fs_csaddr = fs->fs_old_csaddr; fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; } if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_old_inodefmt < FS_44INODEFMT) { fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; fs->fs_qbmask = ~fs->fs_bmask; fs->fs_qfmask = ~fs->fs_fmask; } if (fs->fs_magic == FS_UFS1_MAGIC) { ump->um_savedmaxfilesize = fs->fs_maxfilesize; maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; if (fs->fs_maxfilesize > maxfilesize) fs->fs_maxfilesize = maxfilesize; } /* Compatibility for old filesystems */ if (fs->fs_avgfilesize <= 0) fs->fs_avgfilesize = AVFILESIZ; if (fs->fs_avgfpdir <= 0) fs->fs_avgfpdir = AFPDIR; if (bigcgs) { fs->fs_save_cgsize = fs->fs_cgsize; fs->fs_cgsize = fs->fs_bsize; } } /* * Unwinding superblock updates for old filesystems. * See ffs_oldfscompat_read above for details. * * XXX - Parts get retired eventually. * Unfortunately new bits get added. */ void ffs_oldfscompat_write(fs, ump) struct fs *fs; struct ufsmount *ump; { /* * Copy back UFS2 updated fields that UFS1 inspects. */ if (fs->fs_magic == FS_UFS1_MAGIC) { fs->fs_old_time = fs->fs_time; fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; fs->fs_maxfilesize = ump->um_savedmaxfilesize; } if (bigcgs) { fs->fs_cgsize = fs->fs_save_cgsize; fs->fs_save_cgsize = 0; } } /* * unmount system call */ static int ffs_unmount(mp, mntflags) struct mount *mp; int mntflags; { struct thread *td; struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs; int error, flags, susp; #ifdef UFS_EXTATTR int e_restart; #endif flags = 0; td = curthread; fs = ump->um_fs; susp = 0; if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; susp = fs->fs_ronly == 0; } #ifdef UFS_EXTATTR if ((error = ufs_extattr_stop(mp, td))) { if (error != EOPNOTSUPP) printf("WARNING: unmount %s: ufs_extattr_stop " "returned errno %d\n", mp->mnt_stat.f_mntonname, error); e_restart = 0; } else { ufs_extattr_uepm_destroy(&ump->um_extattr); e_restart = 1; } #endif if (susp) { error = vfs_write_suspend_umnt(mp); if (error != 0) goto fail1; } if (MOUNTEDSOFTDEP(mp)) error = softdep_flushfiles(mp, flags, td); else error = ffs_flushfiles(mp, flags, td); if (error != 0 && error != ENXIO) goto fail; UFS_LOCK(ump); if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { printf("WARNING: unmount %s: pending error: blocks %jd " "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } UFS_UNLOCK(ump); if (MOUNTEDSOFTDEP(mp)) softdep_unmount(mp); if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) { fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; error = ffs_sbupdate(ump, MNT_WAIT, 0); if (error && error != ENXIO) { fs->fs_clean = 0; goto fail; } } if (susp) vfs_write_resume(mp, VR_START_WRITE); if (ump->um_trim_tq != NULL) { while (ump->um_trim_inflight != 0) pause("ufsutr", hz); taskqueue_drain_all(ump->um_trim_tq); taskqueue_free(ump->um_trim_tq); } g_topology_lock(); if (ump->um_fsckpid > 0) { /* * Return to normal read-only mode. */ error = g_access(ump->um_cp, 0, -1, 0); ump->um_fsckpid = 0; } g_vfs_close(ump->um_cp); g_topology_unlock(); atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); vrele(ump->um_devvp); dev_rel(ump->um_dev); mtx_destroy(UFS_MTX(ump)); if (mp->mnt_gjprovider != NULL) { free(mp->mnt_gjprovider, M_UFSMNT); mp->mnt_gjprovider = NULL; } free(fs->fs_csp, M_UFSMNT); free(fs, M_UFSMNT); free(ump, M_UFSMNT); mp->mnt_data = NULL; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_LOCAL; MNT_IUNLOCK(mp); + if (td->td_su == mp) { + td->td_su = NULL; + vfs_rel(mp); + } return (error); fail: if (susp) vfs_write_resume(mp, VR_START_WRITE); fail1: #ifdef UFS_EXTATTR if (e_restart) { ufs_extattr_uepm_init(&ump->um_extattr); #ifdef UFS_EXTATTR_AUTOSTART (void) ufs_extattr_autostart(mp, td); #endif } #endif return (error); } /* * Flush out all the files in a filesystem. */ int ffs_flushfiles(mp, flags, td) struct mount *mp; int flags; struct thread *td; { struct ufsmount *ump; int qerror, error; ump = VFSTOUFS(mp); qerror = 0; #ifdef QUOTA if (mp->mnt_flag & MNT_QUOTA) { int i; error = vflush(mp, 0, SKIPSYSTEM|flags, td); if (error) return (error); for (i = 0; i < MAXQUOTAS; i++) { error = quotaoff(td, mp, i); if (error != 0) { if ((flags & EARLYFLUSH) == 0) return (error); else qerror = error; } } /* * Here we fall through to vflush again to ensure that * we have gotten rid of all the system vnodes, unless * quotas must not be closed. */ } #endif ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) return (error); ffs_snapshot_unmount(mp); flags |= FORCECLOSE; /* * Here we fall through to vflush again to ensure * that we have gotten rid of all the system vnodes. */ } /* * Do not close system files if quotas were not closed, to be * able to sync the remaining dquots. The freeblks softupdate * workitems might hold a reference on a dquot, preventing * quotaoff() from completing. Next round of * softdep_flushworklist() iteration should process the * blockers, allowing the next run of quotaoff() to finally * flush held dquots. * * Otherwise, flush all the files. */ if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) return (error); /* * Flush filesystem metadata. */ vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); VOP_UNLOCK(ump->um_devvp, 0); return (error); } /* * Get filesystem statistics. */ static int ffs_statfs(mp, sbp) struct mount *mp; struct statfs *sbp; { struct ufsmount *ump; struct fs *fs; ump = VFSTOUFS(mp); fs = ump->um_fs; if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) panic("ffs_statfs"); sbp->f_version = STATFS_VERSION; sbp->f_bsize = fs->fs_fsize; sbp->f_iosize = fs->fs_bsize; sbp->f_blocks = fs->fs_dsize; UFS_LOCK(ump); sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); sbp->f_bavail = freespace(fs, fs->fs_minfree) + dbtofsb(fs, fs->fs_pendingblocks); sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; UFS_UNLOCK(ump); sbp->f_namemax = UFS_MAXNAMLEN; return (0); } static bool sync_doupdate(struct inode *ip) { return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) != 0); } /* * For a lazy sync, we only care about access times, quotas and the * superblock. Other filesystem changes are already converted to * cylinder group blocks or inode blocks updates and are written to * disk by syncer. */ static int ffs_sync_lazy(mp) struct mount *mp; { struct vnode *mvp, *vp; struct inode *ip; struct thread *td; int allerror, error; allerror = 0; td = curthread; if ((mp->mnt_flag & MNT_NOATIME) != 0) goto qupdate; MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } ip = VTOI(vp); /* * The IN_ACCESS flag is converted to IN_MODIFIED by * ufs_close() and ufs_getattr() by the calls to * ufs_itimes_locked(), without subsequent UFS_UPDATE(). * Test also all the other timestamp flags too, to pick up * any other cases that could be missed. */ if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { VI_UNLOCK(vp); continue; } if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td)) != 0) continue; if (sync_doupdate(ip)) error = ffs_update(vp, 0); if (error != 0) allerror = error; vput(vp); } qupdate: #ifdef QUOTA qsync(mp); #endif if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) allerror = error; return (allerror); } /* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Note: we are always called with the filesystem marked busy using * vfs_busy(). */ static int ffs_sync(mp, waitfor) struct mount *mp; int waitfor; { struct vnode *mvp, *vp, *devvp; struct thread *td; struct inode *ip; struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs; int error, count, lockreq, allerror = 0; int suspend; int suspended; int secondary_writes; int secondary_accwrites; int softdep_deps; int softdep_accdeps; struct bufobj *bo; suspend = 0; suspended = 0; td = curthread; fs = ump->um_fs; if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0) panic("%s: ffs_sync: modification on read-only filesystem", fs->fs_fsmnt); if (waitfor == MNT_LAZY) { if (!rebooting) return (ffs_sync_lazy(mp)); waitfor = MNT_NOWAIT; } /* * Write back each (modified) inode. */ lockreq = LK_EXCLUSIVE | LK_NOWAIT; if (waitfor == MNT_SUSPEND) { suspend = 1; waitfor = MNT_WAIT; } if (waitfor == MNT_WAIT) lockreq = LK_EXCLUSIVE; lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; loop: /* Grab snapshot of secondary write counts */ MNT_ILOCK(mp); secondary_writes = mp->mnt_secondary_writes; secondary_accwrites = mp->mnt_secondary_accwrites; MNT_IUNLOCK(mp); /* Grab snapshot of softdep dependency counts */ softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * Depend on the vnode interlock to keep things stable enough * for a quick test. Since there might be hundreds of * thousands of vnodes, we cannot afford even a subroutine * call unless there's a good chance that we have work to do. */ if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && vp->v_bufobj.bo_dirty.bv_cnt == 0) { VI_UNLOCK(vp); continue; } if ((error = vget(vp, lockreq, td)) != 0) { if (error == ENOENT || error == ENOLCK) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } continue; } if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0) allerror = error; vput(vp); } /* * Force stale filesystem control information to be flushed. */ if (waitfor == MNT_WAIT || rebooting) { if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) allerror = error; /* Flushed work items may create new vnodes to clean */ if (allerror == 0 && count) goto loop; } #ifdef QUOTA qsync(mp); #endif devvp = ump->um_devvp; bo = &devvp->v_bufobj; BO_LOCK(bo); if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { BO_UNLOCK(bo); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(devvp, waitfor, td); VOP_UNLOCK(devvp, 0); if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) error = ffs_sbupdate(ump, waitfor, 0); if (error != 0) allerror = error; if (allerror == 0 && waitfor == MNT_WAIT) goto loop; } else if (suspend != 0) { if (softdep_check_suspend(mp, devvp, softdep_deps, softdep_accdeps, secondary_writes, secondary_accwrites) != 0) { MNT_IUNLOCK(mp); goto loop; /* More work needed */ } mtx_assert(MNT_MTX(mp), MA_OWNED); mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; MNT_IUNLOCK(mp); suspended = 1; } else BO_UNLOCK(bo); /* * Write back modified superblock. */ if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) allerror = error; return (allerror); } int ffs_vget(mp, ino, flags, vpp) struct mount *mp; ino_t ino; int flags; struct vnode **vpp; { return (ffs_vgetf(mp, ino, flags, vpp, 0)); } int ffs_vgetf(mp, ino, flags, vpp, ffs_flags) struct mount *mp; ino_t ino; int flags; struct vnode **vpp; int ffs_flags; { struct fs *fs; struct inode *ip; struct ufsmount *ump; struct buf *bp; struct vnode *vp; int error; error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* * We must promote to an exclusive lock for vnode creation. This * can happen if lookup is passed LOCKSHARED. */ if ((flags & LK_TYPE_MASK) == LK_SHARED) { flags &= ~LK_TYPE_MASK; flags |= LK_EXCLUSIVE; } /* * We do not lock vnode creation as it is believed to be too * expensive for such rare case as simultaneous creation of vnode * for same ino by different processes. We just allow them to race * and check later to decide who wins. Let the race begin! */ ump = VFSTOUFS(mp); fs = ump->um_fs; ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO); /* Allocate a new vnode/inode. */ error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? &ffs_vnodeops1 : &ffs_vnodeops2, &vp); if (error) { *vpp = NULL; uma_zfree(uma_inode, ip); return (error); } /* * FFS supports recursive locking. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); VN_LOCK_AREC(vp); vp->v_data = ip; vp->v_bufobj.bo_bsize = fs->fs_bsize; ip->i_vnode = vp; ip->i_ump = ump; ip->i_number = ino; ip->i_ea_refs = 0; ip->i_nextclustercg = -1; ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; #ifdef QUOTA { int i; for (i = 0; i < MAXQUOTAS; i++) ip->i_dquot[i] = NODQUOT; } #endif if (ffs_flags & FFSV_FORCEINSMQ) vp->v_vflag |= VV_FORCEINSMQ; error = insmntque(vp, mp); if (error != 0) { uma_zfree(uma_inode, ip); *vpp = NULL; return (error); } vp->v_vflag &= ~VV_FORCEINSMQ; error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* Read in the disk contents for the inode, copy into the inode. */ error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->fs_bsize, NOCRED, &bp); if (error) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ brelse(bp); vput(vp); *vpp = NULL; return (error); } if (I_IS_UFS1(ip)) ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); else ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); ffs_load_inode(bp, ip, fs, ino); if (DOINGSOFTDEP(vp)) softdep_load_inodeblock(ip); else ip->i_effnlink = ip->i_nlink; bqrelse(bp); /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. */ error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, &vp); if (error) { vput(vp); *vpp = NULL; return (error); } /* * Finish inode initialization. */ if (vp->v_type != VFIFO) { /* FFS supports shared locking for all files except fifos. */ VN_LOCK_ASHARE(vp); } /* * Set up a generation number for this inode if it does not * already have one. This should only happen on old filesystems. */ if (ip->i_gen == 0) { while (ip->i_gen == 0) ip->i_gen = arc4random(); if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { ip->i_flag |= IN_MODIFIED; DIP_SET(ip, i_gen, ip->i_gen); } } #ifdef MAC if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { /* * If this vnode is already allocated, and we're running * multi-label, attempt to perform a label association * from the extended attributes on the inode. */ error = mac_vnode_associate_extattr(mp, vp); if (error) { /* ufs_inactive will release ip->i_devvp ref. */ vput(vp); *vpp = NULL; return (error); } } #endif *vpp = vp; return (0); } /* * File handle to vnode * * Have to be really careful about stale file handles: * - check that the inode number is valid * - for UFS2 check that the inode number is initialized * - call ffs_vget() to get the locked inode * - check for an unallocated inode (i_mode == 0) * - check that the given client host has export rights and return * those rights via. exflagsp and credanonp */ static int ffs_fhtovp(mp, fhp, flags, vpp) struct mount *mp; struct fid *fhp; int flags; struct vnode **vpp; { struct ufid *ufhp; struct ufsmount *ump; struct fs *fs; struct cg *cgp; struct buf *bp; ino_t ino; u_int cg; int error; ufhp = (struct ufid *)fhp; ino = ufhp->ufid_ino; ump = VFSTOUFS(mp); fs = ump->um_fs; if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) return (ESTALE); /* * Need to check if inode is initialized because UFS2 does lazy * initialization and nfs_fhtovp can offer arbitrary inode numbers. */ if (fs->fs_magic != FS_UFS2_MAGIC) return (ufs_fhtovp(mp, ufhp, flags, vpp)); cg = ino_to_cg(fs, ino); error = bread(ump->um_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, NOCRED, &bp); if (error) return (error); cgp = (struct cg *)bp->b_data; if (!cg_chkmagic(cgp) || ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { brelse(bp); return (ESTALE); } brelse(bp); return (ufs_fhtovp(mp, ufhp, flags, vpp)); } /* * Initialize the filesystem. */ static int ffs_init(vfsp) struct vfsconf *vfsp; { ffs_susp_initialize(); softdep_initialize(); return (ufs_init(vfsp)); } /* * Undo the work of ffs_init(). */ static int ffs_uninit(vfsp) struct vfsconf *vfsp; { int ret; ret = ufs_uninit(vfsp); softdep_uninitialize(); ffs_susp_uninitialize(); return (ret); } /* * Write a superblock and associated information back to disk. */ int ffs_sbupdate(ump, waitfor, suspended) struct ufsmount *ump; int waitfor; int suspended; { struct fs *fs = ump->um_fs; struct buf *sbbp; struct buf *bp; int blks; void *space; int i, size, error, allerror = 0; if (fs->fs_ronly == 1 && (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0) panic("ffs_sbupdate: write read-only filesystem"); /* * We use the superblock's buf to serialize calls to ffs_sbupdate(). */ sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), (int)fs->fs_sbsize, 0, 0, 0); /* * First write back the summary information. */ blks = howmany(fs->fs_cssize, fs->fs_fsize); space = fs->fs_csp; for (i = 0; i < blks; i += fs->fs_frag) { size = fs->fs_bsize; if (i + fs->fs_frag > blks) size = (blks - i) * fs->fs_fsize; bp = getblk(ump->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 0, 0, 0); bcopy(space, bp->b_data, (u_int)size); space = (char *)space + size; if (suspended) bp->b_flags |= B_VALIDSUSPWRT; if (waitfor != MNT_WAIT) bawrite(bp); else if ((error = bwrite(bp)) != 0) allerror = error; } /* * Now write back the superblock itself. If any errors occurred * up to this point, then fail so that the superblock avoids * being written out as clean. */ if (allerror) { brelse(sbbp); return (allerror); } bp = sbbp; if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); fs->fs_sblockloc = SBLOCK_UFS1; } if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); fs->fs_sblockloc = SBLOCK_UFS2; } fs->fs_fmod = 0; fs->fs_time = time_second; if (MOUNTEDSOFTDEP(ump->um_mountp)) softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); ffs_oldfscompat_write((struct fs *)bp->b_data, ump); if (suspended) bp->b_flags |= B_VALIDSUSPWRT; if (waitfor != MNT_WAIT) bawrite(bp); else if ((error = bwrite(bp)) != 0) allerror = error; return (allerror); } static int ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, int attrnamespace, const char *attrname) { #ifdef UFS_EXTATTR return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, attrname)); #else return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)); #endif } static void ffs_ifree(struct ufsmount *ump, struct inode *ip) { if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) uma_zfree(uma_ufs1, ip->i_din1); else if (ip->i_din2 != NULL) uma_zfree(uma_ufs2, ip->i_din2); uma_zfree(uma_inode, ip); } static int dobkgrdwrite = 1; SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, "Do background writes (honoring the BV_BKGRDWRITE flag)?"); /* * Complete a background write started from bwrite. */ static void ffs_backgroundwritedone(struct buf *bp) { struct bufobj *bufobj; struct buf *origbp; /* * Find the original buffer that we are writing. */ bufobj = bp->b_bufobj; BO_LOCK(bufobj); if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) panic("backgroundwritedone: lost buffer"); /* * We should mark the cylinder group buffer origbp as * dirty, to not loose the failed write. */ if ((bp->b_ioflags & BIO_ERROR) != 0) origbp->b_vflags |= BV_BKGRDERR; BO_UNLOCK(bufobj); /* * Process dependencies then return any unfinished ones. */ pbrelvp(bp); if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) buf_complete(bp); #ifdef SOFTUPDATES if (!LIST_EMPTY(&bp->b_dep)) softdep_move_dependencies(bp, origbp); #endif /* * This buffer is marked B_NOCACHE so when it is released * by biodone it will be tossed. */ bp->b_flags |= B_NOCACHE; bp->b_flags &= ~B_CACHE; /* * Prevent brelse() from trying to keep and re-dirtying bp on * errors. It causes b_bufobj dereference in * bdirty()/reassignbuf(), and b_bufobj was cleared in * pbrelvp() above. */ if ((bp->b_ioflags & BIO_ERROR) != 0) bp->b_flags |= B_INVAL; bufdone(bp); BO_LOCK(bufobj); /* * Clear the BV_BKGRDINPROG flag in the original buffer * and awaken it if it is waiting for the write to complete. * If BV_BKGRDINPROG is not set in the original buffer it must * have been released and re-instantiated - which is not legal. */ KASSERT((origbp->b_vflags & BV_BKGRDINPROG), ("backgroundwritedone: lost buffer2")); origbp->b_vflags &= ~BV_BKGRDINPROG; if (origbp->b_vflags & BV_BKGRDWAIT) { origbp->b_vflags &= ~BV_BKGRDWAIT; wakeup(&origbp->b_xflags); } BO_UNLOCK(bufobj); } /* * Write, release buffer on completion. (Done by iodone * if async). Do not bother writing anything if the buffer * is invalid. * * Note that we set B_CACHE here, indicating that buffer is * fully valid and thus cacheable. This is true even of NFS * now so we set it generally. This could be set either here * or in biodone() since the I/O is synchronous. We put it * here. */ static int ffs_bufwrite(struct buf *bp) { struct buf *newbp; CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); if (bp->b_flags & B_INVAL) { brelse(bp); return (0); } if (!BUF_ISLOCKED(bp)) panic("bufwrite: buffer is not busy???"); /* * If a background write is already in progress, delay * writing this block if it is asynchronous. Otherwise * wait for the background write to complete. */ BO_LOCK(bp->b_bufobj); if (bp->b_vflags & BV_BKGRDINPROG) { if (bp->b_flags & B_ASYNC) { BO_UNLOCK(bp->b_bufobj); bdwrite(bp); return (0); } bp->b_vflags |= BV_BKGRDWAIT; msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, "bwrbg", 0); if (bp->b_vflags & BV_BKGRDINPROG) panic("bufwrite: still writing"); } bp->b_vflags &= ~BV_BKGRDERR; BO_UNLOCK(bp->b_bufobj); /* * If this buffer is marked for background writing and we * do not have to wait for it, make a copy and write the * copy so as to leave this buffer ready for further use. * * This optimization eats a lot of memory. If we have a page * or buffer shortfall we can't do it. */ if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && (bp->b_flags & B_ASYNC) && !vm_page_count_severe() && !buf_dirty_count_severe()) { KASSERT(bp->b_iodone == NULL, ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); /* get a new block */ newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); if (newbp == NULL) goto normal_write; KASSERT(buf_mapped(bp), ("Unmapped cg")); memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); BO_LOCK(bp->b_bufobj); bp->b_vflags |= BV_BKGRDINPROG; BO_UNLOCK(bp->b_bufobj); newbp->b_xflags |= BX_BKGRDMARKER; newbp->b_lblkno = bp->b_lblkno; newbp->b_blkno = bp->b_blkno; newbp->b_offset = bp->b_offset; newbp->b_iodone = ffs_backgroundwritedone; newbp->b_flags |= B_ASYNC; newbp->b_flags &= ~B_INVAL; pbgetvp(bp->b_vp, newbp); #ifdef SOFTUPDATES /* * Move over the dependencies. If there are rollbacks, * leave the parent buffer dirtied as it will need to * be written again. */ if (LIST_EMPTY(&bp->b_dep) || softdep_move_dependencies(bp, newbp) == 0) bundirty(bp); #else bundirty(bp); #endif /* * Initiate write on the copy, release the original. The * BKGRDINPROG flag prevents it from going away until * the background write completes. */ bqrelse(bp); bp = newbp; } else /* Mark the buffer clean */ bundirty(bp); /* Let the normal bufwrite do the rest for us */ normal_write: return (bufwrite(bp)); } static void ffs_geom_strategy(struct bufobj *bo, struct buf *bp) { struct vnode *vp; int error; struct buf *tbp; int nocopy; vp = bo2vnode(bo); if (bp->b_iocmd == BIO_WRITE) { if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && bp->b_vp != NULL && bp->b_vp->v_mount != NULL && (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) panic("ffs_geom_strategy: bad I/O"); nocopy = bp->b_flags & B_NOCOPY; bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && vp->v_rdev->si_snapdata != NULL) { if ((bp->b_flags & B_CLUSTER) != 0) { runningbufwakeup(bp); TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, b_cluster.cluster_entry) { error = ffs_copyonwrite(vp, tbp); if (error != 0 && error != EOPNOTSUPP) { bp->b_error = error; bp->b_ioflags |= BIO_ERROR; bufdone(bp); return; } } bp->b_runningbufspace = bp->b_bufsize; atomic_add_long(&runningbufspace, bp->b_runningbufspace); } else { error = ffs_copyonwrite(vp, bp); if (error != 0 && error != EOPNOTSUPP) { bp->b_error = error; bp->b_ioflags |= BIO_ERROR; bufdone(bp); return; } } } #ifdef SOFTUPDATES if ((bp->b_flags & B_CLUSTER) != 0) { TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, b_cluster.cluster_entry) { if (!LIST_EMPTY(&tbp->b_dep)) buf_start(tbp); } } else { if (!LIST_EMPTY(&bp->b_dep)) buf_start(bp); } #endif } g_vfs_strategy(bo, bp); } int ffs_own_mount(const struct mount *mp) { if (mp->mnt_op == &ufs_vfsops) return (1); return (0); } #ifdef DDB #ifdef SOFTUPDATES /* defined in ffs_softdep.c */ extern void db_print_ffs(struct ufsmount *ump); DB_SHOW_COMMAND(ffs, db_show_ffs) { struct mount *mp; struct ufsmount *ump; if (have_addr) { ump = VFSTOUFS((struct mount *)addr); db_print_ffs(ump); return; } TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) db_print_ffs(VFSTOUFS(mp)); } } #endif /* SOFTUPDATES */ #endif /* DDB */ Index: projects/clang500-import/sys/ufs/ffs/softdep.h =================================================================== --- projects/clang500-import/sys/ufs/ffs/softdep.h (revision 319548) +++ projects/clang500-import/sys/ufs/ffs/softdep.h (revision 319549) @@ -1,1099 +1,1100 @@ /*- * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. * * The soft updates code is derived from the appendix of a University * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, * "Soft Updates: A Solution to the Metadata Update Problem in File * Systems", CSE-TR-254-95, August 1995). * * Further information about soft updates can be obtained from: * * Marshall Kirk McKusick http://www.mckusick.com/softdep/ * 1614 Oxford Street mckusick@mckusick.com * Berkeley, CA 94709-1608 +1-510-843-9542 * USA * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)softdep.h 9.7 (McKusick) 6/21/00 * $FreeBSD$ */ #include /* * Allocation dependencies are handled with undo/redo on the in-memory * copy of the data. A particular data dependency is eliminated when * it is ALLCOMPLETE: that is ATTACHED, DEPCOMPLETE, and COMPLETE. * * The ATTACHED flag means that the data is not currently being written * to disk. * * The UNDONE flag means that the data has been rolled back to a safe * state for writing to the disk. When the I/O completes, the data is * restored to its current form and the state reverts to ATTACHED. * The data must be locked throughout the rollback, I/O, and roll * forward so that the rolled back information is never visible to * user processes. * * The COMPLETE flag indicates that the item has been written. For example, * a dependency that requires that an inode be written will be marked * COMPLETE after the inode has been written to disk. * * The DEPCOMPLETE flag indicates the completion of any other * dependencies such as the writing of a cylinder group map has been * completed. A dependency structure may be freed only when both it * and its dependencies have completed and any rollbacks that are in * progress have finished as indicated by the set of ALLCOMPLETE flags * all being set. * * The two MKDIR flags indicate additional dependencies that must be done * when creating a new directory. MKDIR_BODY is cleared when the directory * data block containing the "." and ".." entries has been written. * MKDIR_PARENT is cleared when the parent inode with the increased link * count for ".." has been written. When both MKDIR flags have been * cleared, the DEPCOMPLETE flag is set to indicate that the directory * dependencies have been completed. The writing of the directory inode * itself sets the COMPLETE flag which then allows the directory entry for * the new directory to be written to disk. The RMDIR flag marks a dirrem * structure as representing the removal of a directory rather than a * file. When the removal dependencies are completed, additional work needs * to be done* (an additional decrement of the associated inode, and a * decrement of the parent inode). * * The DIRCHG flag marks a diradd structure as representing the changing * of an existing entry rather than the addition of a new one. When * the update is complete the dirrem associated with the inode for * the old name must be added to the worklist to do the necessary * reference count decrement. * * The GOINGAWAY flag indicates that the data structure is frozen from * further change until its dependencies have been completed and its * resources freed after which it will be discarded. * * The IOSTARTED flag prevents multiple calls to the I/O start routine from * doing multiple rollbacks. * * The NEWBLOCK flag marks pagedep structures that have just been allocated, * so must be claimed by the inode before all dependencies are complete. * * The INPROGRESS flag marks worklist structures that are still on the * worklist, but are being considered for action by some process. * * The UFS1FMT flag indicates that the inode being processed is a ufs1 format. * * The EXTDATA flag indicates that the allocdirect describes an * extended-attributes dependency. * * The ONWORKLIST flag shows whether the structure is currently linked * onto a worklist. * * The UNLINK* flags track the progress of updating the on-disk linked * list of active but unlinked inodes. When an inode is first unlinked * it is marked as UNLINKED. When its on-disk di_freelink has been * written its UNLINKNEXT flags is set. When its predecessor in the * list has its di_freelink pointing at us its UNLINKPREV is set. * When the on-disk list can reach it from the superblock, its * UNLINKONLIST flag is set. Once all of these flags are set, it * is safe to let its last name be removed. */ #define ATTACHED 0x000001 #define UNDONE 0x000002 #define COMPLETE 0x000004 #define DEPCOMPLETE 0x000008 #define MKDIR_PARENT 0x000010 /* diradd, mkdir, jaddref, jsegdep only */ #define MKDIR_BODY 0x000020 /* diradd, mkdir, jaddref only */ #define RMDIR 0x000040 /* dirrem only */ #define DIRCHG 0x000080 /* diradd, dirrem only */ #define GOINGAWAY 0x000100 /* indirdep, jremref only */ #define IOSTARTED 0x000200 /* inodedep, pagedep, bmsafemap only */ #define DELAYEDFREE 0x000400 /* allocindirect free delayed. */ #define NEWBLOCK 0x000800 /* pagedep, jaddref only */ #define INPROGRESS 0x001000 /* dirrem, freeblks, freefrag, freefile only */ #define UFS1FMT 0x002000 /* indirdep only */ #define EXTDATA 0x004000 /* allocdirect only */ #define ONWORKLIST 0x008000 #define IOWAITING 0x010000 /* Thread is waiting for IO to complete. */ #define ONDEPLIST 0x020000 /* Structure is on a dependency list. */ #define UNLINKED 0x040000 /* inodedep has been unlinked. */ #define UNLINKNEXT 0x080000 /* inodedep has valid di_freelink */ #define UNLINKPREV 0x100000 /* inodedep is pointed at in the unlink list */ #define UNLINKONLIST 0x200000 /* inodedep is in the unlinked list on disk */ #define UNLINKLINKS (UNLINKNEXT | UNLINKPREV) #define WRITESUCCEEDED 0x400000 /* the disk write completed successfully */ #define ALLCOMPLETE (ATTACHED | COMPLETE | DEPCOMPLETE) /* * Values for each of the soft dependency types. */ #define D_PAGEDEP 0 #define D_INODEDEP 1 #define D_BMSAFEMAP 2 #define D_NEWBLK 3 #define D_ALLOCDIRECT 4 #define D_INDIRDEP 5 #define D_ALLOCINDIR 6 #define D_FREEFRAG 7 #define D_FREEBLKS 8 #define D_FREEFILE 9 #define D_DIRADD 10 #define D_MKDIR 11 #define D_DIRREM 12 #define D_NEWDIRBLK 13 #define D_FREEWORK 14 #define D_FREEDEP 15 #define D_JADDREF 16 #define D_JREMREF 17 #define D_JMVREF 18 #define D_JNEWBLK 19 #define D_JFREEBLK 20 #define D_JFREEFRAG 21 #define D_JSEG 22 #define D_JSEGDEP 23 #define D_SBDEP 24 #define D_JTRUNC 25 #define D_JFSYNC 26 #define D_SENTINEL 27 #define D_LAST D_SENTINEL /* * The workitem queue. * * It is sometimes useful and/or necessary to clean up certain dependencies * in the background rather than during execution of an application process * or interrupt service routine. To realize this, we append dependency * structures corresponding to such tasks to a "workitem" queue. In a soft * updates implementation, most pending workitems should not wait for more * than a couple of seconds, so the filesystem syncer process awakens once * per second to process the items on the queue. */ /* LIST_HEAD(workhead, worklist); -- declared in buf.h */ /* * Each request can be linked onto a work queue through its worklist structure. * To avoid the need for a pointer to the structure itself, this structure * MUST be declared FIRST in each type in which it appears! If more than one * worklist is needed in the structure, then a wk_data field must be added * and the macros below changed to use it. */ struct worklist { LIST_ENTRY(worklist) wk_list; /* list of work requests */ struct mount *wk_mp; /* Mount we live in */ unsigned int wk_type:8, /* type of request */ wk_state:24; /* state flags */ }; #define WK_DATA(wk) ((void *)(wk)) #define WK_PAGEDEP(wk) ((struct pagedep *)(wk)) #define WK_INODEDEP(wk) ((struct inodedep *)(wk)) #define WK_BMSAFEMAP(wk) ((struct bmsafemap *)(wk)) #define WK_NEWBLK(wk) ((struct newblk *)(wk)) #define WK_ALLOCDIRECT(wk) ((struct allocdirect *)(wk)) #define WK_INDIRDEP(wk) ((struct indirdep *)(wk)) #define WK_ALLOCINDIR(wk) ((struct allocindir *)(wk)) #define WK_FREEFRAG(wk) ((struct freefrag *)(wk)) #define WK_FREEBLKS(wk) ((struct freeblks *)(wk)) #define WK_FREEWORK(wk) ((struct freework *)(wk)) #define WK_FREEFILE(wk) ((struct freefile *)(wk)) #define WK_DIRADD(wk) ((struct diradd *)(wk)) #define WK_MKDIR(wk) ((struct mkdir *)(wk)) #define WK_DIRREM(wk) ((struct dirrem *)(wk)) #define WK_NEWDIRBLK(wk) ((struct newdirblk *)(wk)) #define WK_JADDREF(wk) ((struct jaddref *)(wk)) #define WK_JREMREF(wk) ((struct jremref *)(wk)) #define WK_JMVREF(wk) ((struct jmvref *)(wk)) #define WK_JSEGDEP(wk) ((struct jsegdep *)(wk)) #define WK_JSEG(wk) ((struct jseg *)(wk)) #define WK_JNEWBLK(wk) ((struct jnewblk *)(wk)) #define WK_JFREEBLK(wk) ((struct jfreeblk *)(wk)) #define WK_FREEDEP(wk) ((struct freedep *)(wk)) #define WK_JFREEFRAG(wk) ((struct jfreefrag *)(wk)) #define WK_SBDEP(wk) ((struct sbdep *)(wk)) #define WK_JTRUNC(wk) ((struct jtrunc *)(wk)) #define WK_JFSYNC(wk) ((struct jfsync *)(wk)) /* * Various types of lists */ LIST_HEAD(dirremhd, dirrem); LIST_HEAD(diraddhd, diradd); LIST_HEAD(newblkhd, newblk); LIST_HEAD(inodedephd, inodedep); LIST_HEAD(allocindirhd, allocindir); LIST_HEAD(allocdirecthd, allocdirect); TAILQ_HEAD(allocdirectlst, allocdirect); LIST_HEAD(indirdephd, indirdep); LIST_HEAD(jaddrefhd, jaddref); LIST_HEAD(jremrefhd, jremref); LIST_HEAD(jmvrefhd, jmvref); LIST_HEAD(jnewblkhd, jnewblk); LIST_HEAD(jblkdephd, jblkdep); LIST_HEAD(freeworkhd, freework); TAILQ_HEAD(freeworklst, freework); TAILQ_HEAD(jseglst, jseg); TAILQ_HEAD(inoreflst, inoref); TAILQ_HEAD(freeblklst, freeblks); /* * The "pagedep" structure tracks the various dependencies related to * a particular directory page. If a directory page has any dependencies, * it will have a pagedep linked to its associated buffer. The * pd_dirremhd list holds the list of dirrem requests which decrement * inode reference counts. These requests are processed after the * directory page with the corresponding zero'ed entries has been * written. The pd_diraddhd list maintains the list of diradd requests * which cannot be committed until their corresponding inode has been * written to disk. Because a directory may have many new entries * being created, several lists are maintained hashed on bits of the * offset of the entry into the directory page to keep the lists from * getting too long. Once a new directory entry has been cleared to * be written, it is moved to the pd_pendinghd list. After the new * entry has been written to disk it is removed from the pd_pendinghd * list, any removed operations are done, and the dependency structure * is freed. */ #define DAHASHSZ 5 #define DIRADDHASH(offset) (((offset) >> 2) % DAHASHSZ) struct pagedep { struct worklist pd_list; /* page buffer */ # define pd_state pd_list.wk_state /* check for multiple I/O starts */ LIST_ENTRY(pagedep) pd_hash; /* hashed lookup */ ino_t pd_ino; /* associated file */ ufs_lbn_t pd_lbn; /* block within file */ struct newdirblk *pd_newdirblk; /* associated newdirblk if NEWBLOCK */ struct dirremhd pd_dirremhd; /* dirrem's waiting for page */ struct diraddhd pd_diraddhd[DAHASHSZ]; /* diradd dir entry updates */ struct diraddhd pd_pendinghd; /* directory entries awaiting write */ struct jmvrefhd pd_jmvrefhd; /* Dependent journal writes. */ }; /* * The "inodedep" structure tracks the set of dependencies associated * with an inode. One task that it must manage is delayed operations * (i.e., work requests that must be held until the inodedep's associated * inode has been written to disk). Getting an inode from its incore * state to the disk requires two steps to be taken by the filesystem * in this order: first the inode must be copied to its disk buffer by * the VOP_UPDATE operation; second the inode's buffer must be written * to disk. To ensure that both operations have happened in the required * order, the inodedep maintains two lists. Delayed operations are * placed on the id_inowait list. When the VOP_UPDATE is done, all * operations on the id_inowait list are moved to the id_bufwait list. * When the buffer is written, the items on the id_bufwait list can be * safely moved to the work queue to be processed. A second task of the * inodedep structure is to track the status of block allocation within * the inode. Each block that is allocated is represented by an * "allocdirect" structure (see below). It is linked onto the id_newinoupdt * list until both its contents and its allocation in the cylinder * group map have been written to disk. Once these dependencies have been * satisfied, it is removed from the id_newinoupdt list and any followup * actions such as releasing the previous block or fragment are placed * on the id_inowait list. When an inode is updated (a VOP_UPDATE is * done), the "inodedep" structure is linked onto the buffer through * its worklist. Thus, it will be notified when the buffer is about * to be written and when it is done. At the update time, all the * elements on the id_newinoupdt list are moved to the id_inoupdt list * since those changes are now relevant to the copy of the inode in the * buffer. Also at update time, the tasks on the id_inowait list are * moved to the id_bufwait list so that they will be executed when * the updated inode has been written to disk. When the buffer containing * the inode is written to disk, any updates listed on the id_inoupdt * list are rolled back as they are not yet safe. Following the write, * the changes are once again rolled forward and any actions on the * id_bufwait list are processed (since those actions are now safe). * The entries on the id_inoupdt and id_newinoupdt lists must be kept * sorted by logical block number to speed the calculation of the size * of the rolled back inode (see explanation in initiate_write_inodeblock). * When a directory entry is created, it is represented by a diradd. * The diradd is added to the id_inowait list as it cannot be safely * written to disk until the inode that it represents is on disk. After * the inode is written, the id_bufwait list is processed and the diradd * entries are moved to the id_pendinghd list where they remain until * the directory block containing the name has been written to disk. * The purpose of keeping the entries on the id_pendinghd list is so that * the softdep_fsync function can find and push the inode's directory * name(s) as part of the fsync operation for that file. */ struct inodedep { struct worklist id_list; /* buffer holding inode block */ # define id_state id_list.wk_state /* inode dependency state */ LIST_ENTRY(inodedep) id_hash; /* hashed lookup */ TAILQ_ENTRY(inodedep) id_unlinked; /* Unlinked but ref'd inodes */ struct fs *id_fs; /* associated filesystem */ ino_t id_ino; /* dependent inode */ nlink_t id_nlinkdelta; /* saved effective link count */ nlink_t id_savednlink; /* Link saved during rollback */ LIST_ENTRY(inodedep) id_deps; /* bmsafemap's list of inodedep's */ struct bmsafemap *id_bmsafemap; /* related bmsafemap (if pending) */ struct diradd *id_mkdiradd; /* diradd for a mkdir. */ struct inoreflst id_inoreflst; /* Inode reference adjustments. */ long id_savedextsize; /* ext size saved during rollback */ off_t id_savedsize; /* file size saved during rollback */ struct dirremhd id_dirremhd; /* Removals pending. */ struct workhead id_pendinghd; /* entries awaiting directory write */ struct workhead id_bufwait; /* operations after inode written */ struct workhead id_inowait; /* operations waiting inode update */ struct allocdirectlst id_inoupdt; /* updates before inode written */ struct allocdirectlst id_newinoupdt; /* updates when inode written */ struct allocdirectlst id_extupdt; /* extdata updates pre-inode write */ struct allocdirectlst id_newextupdt; /* extdata updates at ino write */ struct freeblklst id_freeblklst; /* List of partial truncates. */ union { struct ufs1_dinode *idu_savedino1; /* saved ufs1_dinode contents */ struct ufs2_dinode *idu_savedino2; /* saved ufs2_dinode contents */ } id_un; }; #define id_savedino1 id_un.idu_savedino1 #define id_savedino2 id_un.idu_savedino2 /* * A "bmsafemap" structure maintains a list of dependency structures * that depend on the update of a particular cylinder group map. * It has lists for newblks, allocdirects, allocindirs, and inodedeps. * It is attached to the buffer of a cylinder group block when any of * these things are allocated from the cylinder group. It is freed * after the cylinder group map is written and the state of its * dependencies are updated with DEPCOMPLETE to indicate that it has * been processed. */ struct bmsafemap { struct worklist sm_list; /* cylgrp buffer */ # define sm_state sm_list.wk_state LIST_ENTRY(bmsafemap) sm_hash; /* Hash links. */ LIST_ENTRY(bmsafemap) sm_next; /* Mount list. */ int sm_cg; struct buf *sm_buf; /* associated buffer */ struct allocdirecthd sm_allocdirecthd; /* allocdirect deps */ struct allocdirecthd sm_allocdirectwr; /* writing allocdirect deps */ struct allocindirhd sm_allocindirhd; /* allocindir deps */ struct allocindirhd sm_allocindirwr; /* writing allocindir deps */ struct inodedephd sm_inodedephd; /* inodedep deps */ struct inodedephd sm_inodedepwr; /* writing inodedep deps */ struct newblkhd sm_newblkhd; /* newblk deps */ struct newblkhd sm_newblkwr; /* writing newblk deps */ struct jaddrefhd sm_jaddrefhd; /* Pending inode allocations. */ struct jnewblkhd sm_jnewblkhd; /* Pending block allocations. */ struct workhead sm_freehd; /* Freedep deps. */ struct workhead sm_freewr; /* Written freedeps. */ }; /* * A "newblk" structure is attached to a bmsafemap structure when a block * or fragment is allocated from a cylinder group. Its state is set to * DEPCOMPLETE when its cylinder group map is written. It is converted to * an allocdirect or allocindir allocation once the allocator calls the * appropriate setup function. It will initially be linked onto a bmsafemap * list. Once converted it can be linked onto the lists described for * allocdirect or allocindir as described below. */ struct newblk { struct worklist nb_list; /* See comment above. */ # define nb_state nb_list.wk_state LIST_ENTRY(newblk) nb_hash; /* Hashed lookup. */ LIST_ENTRY(newblk) nb_deps; /* Bmsafemap's list of newblks. */ struct jnewblk *nb_jnewblk; /* New block journal entry. */ struct bmsafemap *nb_bmsafemap;/* Cylgrp dep (if pending). */ struct freefrag *nb_freefrag; /* Fragment to be freed (if any). */ struct indirdephd nb_indirdeps; /* Children indirect blocks. */ struct workhead nb_newdirblk; /* Dir block to notify when written. */ struct workhead nb_jwork; /* Journal work pending. */ ufs2_daddr_t nb_newblkno; /* New value of block pointer. */ }; /* * An "allocdirect" structure is attached to an "inodedep" when a new block * or fragment is allocated and pointed to by the inode described by * "inodedep". The worklist is linked to the buffer that holds the block. * When the block is first allocated, it is linked to the bmsafemap * structure associated with the buffer holding the cylinder group map * from which it was allocated. When the cylinder group map is written * to disk, ad_state has the DEPCOMPLETE flag set. When the block itself * is written, the COMPLETE flag is set. Once both the cylinder group map * and the data itself have been written, it is safe to write the inode * that claims the block. If there was a previous fragment that had been * allocated before the file was increased in size, the old fragment may * be freed once the inode claiming the new block is written to disk. * This ad_fragfree request is attached to the id_inowait list of the * associated inodedep (pointed to by ad_inodedep) for processing after * the inode is written. When a block is allocated to a directory, an * fsync of a file whose name is within that block must ensure not only * that the block containing the file name has been written, but also * that the on-disk inode references that block. When a new directory * block is created, we allocate a newdirblk structure which is linked * to the associated allocdirect (on its ad_newdirblk list). When the * allocdirect has been satisfied, the newdirblk structure is moved to * the inodedep id_bufwait list of its directory to await the inode * being written. When the inode is written, the directory entries are * fully committed and can be deleted from their pagedep->id_pendinghd * and inodedep->id_pendinghd lists. */ struct allocdirect { struct newblk ad_block; /* Common block logic */ # define ad_list ad_block.nb_list /* block pointer worklist */ # define ad_state ad_list.wk_state /* block pointer state */ TAILQ_ENTRY(allocdirect) ad_next; /* inodedep's list of allocdirect's */ struct inodedep *ad_inodedep; /* associated inodedep */ ufs2_daddr_t ad_oldblkno; /* old value of block pointer */ int ad_offset; /* Pointer offset in parent. */ long ad_newsize; /* size of new block */ long ad_oldsize; /* size of old block */ }; #define ad_newblkno ad_block.nb_newblkno #define ad_freefrag ad_block.nb_freefrag #define ad_newdirblk ad_block.nb_newdirblk /* * A single "indirdep" structure manages all allocation dependencies for * pointers in an indirect block. The up-to-date state of the indirect * block is stored in ir_savedata. The set of pointers that may be safely * written to the disk is stored in ir_safecopy. The state field is used * only to track whether the buffer is currently being written (in which * case it is not safe to update ir_safecopy). Ir_deplisthd contains the * list of allocindir structures, one for each block that needs to be * written to disk. Once the block and its bitmap allocation have been * written the safecopy can be updated to reflect the allocation and the * allocindir structure freed. If ir_state indicates that an I/O on the * indirect block is in progress when ir_safecopy is to be updated, the * update is deferred by placing the allocindir on the ir_donehd list. * When the I/O on the indirect block completes, the entries on the * ir_donehd list are processed by updating their corresponding ir_safecopy * pointers and then freeing the allocindir structure. */ struct indirdep { struct worklist ir_list; /* buffer holding indirect block */ # define ir_state ir_list.wk_state /* indirect block pointer state */ LIST_ENTRY(indirdep) ir_next; /* alloc{direct,indir} list */ TAILQ_HEAD(, freework) ir_trunc; /* List of truncations. */ caddr_t ir_saveddata; /* buffer cache contents */ struct buf *ir_savebp; /* buffer holding safe copy */ struct buf *ir_bp; /* buffer holding live copy */ struct allocindirhd ir_completehd; /* waiting for indirdep complete */ struct allocindirhd ir_writehd; /* Waiting for the pointer write. */ struct allocindirhd ir_donehd; /* done waiting to update safecopy */ struct allocindirhd ir_deplisthd; /* allocindir deps for this block */ struct freeblks *ir_freeblks; /* Freeblks that frees this indir. */ }; /* * An "allocindir" structure is attached to an "indirdep" when a new block * is allocated and pointed to by the indirect block described by the * "indirdep". The worklist is linked to the buffer that holds the new block. * When the block is first allocated, it is linked to the bmsafemap * structure associated with the buffer holding the cylinder group map * from which it was allocated. When the cylinder group map is written * to disk, ai_state has the DEPCOMPLETE flag set. When the block itself * is written, the COMPLETE flag is set. Once both the cylinder group map * and the data itself have been written, it is safe to write the entry in * the indirect block that claims the block; the "allocindir" dependency * can then be freed as it is no longer applicable. */ struct allocindir { struct newblk ai_block; /* Common block area */ # define ai_state ai_block.nb_list.wk_state /* indirect pointer state */ LIST_ENTRY(allocindir) ai_next; /* indirdep's list of allocindir's */ struct indirdep *ai_indirdep; /* address of associated indirdep */ ufs2_daddr_t ai_oldblkno; /* old value of block pointer */ ufs_lbn_t ai_lbn; /* Logical block number. */ int ai_offset; /* Pointer offset in parent. */ }; #define ai_newblkno ai_block.nb_newblkno #define ai_freefrag ai_block.nb_freefrag #define ai_newdirblk ai_block.nb_newdirblk /* * The allblk union is used to size the newblk structure on allocation so * that it may be any one of three types. */ union allblk { struct allocindir ab_allocindir; struct allocdirect ab_allocdirect; struct newblk ab_newblk; }; /* * A "freefrag" structure is attached to an "inodedep" when a previously * allocated fragment is replaced with a larger fragment, rather than extended. * The "freefrag" structure is constructed and attached when the replacement * block is first allocated. It is processed after the inode claiming the * bigger block that replaces it has been written to disk. */ struct freefrag { struct worklist ff_list; /* id_inowait or delayed worklist */ # define ff_state ff_list.wk_state struct worklist *ff_jdep; /* Associated journal entry. */ struct workhead ff_jwork; /* Journal work pending. */ ufs2_daddr_t ff_blkno; /* fragment physical block number */ long ff_fragsize; /* size of fragment being deleted */ ino_t ff_inum; /* owning inode number */ enum vtype ff_vtype; /* owning inode's file type */ }; /* * A "freeblks" structure is attached to an "inodedep" when the * corresponding file's length is reduced to zero. It records all * the information needed to free the blocks of a file after its * zero'ed inode has been written to disk. The actual work is done * by child freework structures which are responsible for individual * inode pointers while freeblks is responsible for retiring the * entire operation when it is complete and holding common members. */ struct freeblks { struct worklist fb_list; /* id_inowait or delayed worklist */ # define fb_state fb_list.wk_state /* inode and dirty block state */ TAILQ_ENTRY(freeblks) fb_next; /* List of inode truncates. */ struct jblkdephd fb_jblkdephd; /* Journal entries pending */ struct workhead fb_freeworkhd; /* Work items pending */ struct workhead fb_jwork; /* Journal work pending */ struct vnode *fb_devvp; /* filesystem device vnode */ #ifdef QUOTA struct dquot *fb_quota[MAXQUOTAS]; /* quotas to be adjusted */ #endif uint64_t fb_modrev; /* Inode revision at start of trunc. */ off_t fb_len; /* Length we're truncating to. */ ufs2_daddr_t fb_chkcnt; /* Blocks released. */ ino_t fb_inum; /* inode owner of blocks */ enum vtype fb_vtype; /* inode owner's file type */ uid_t fb_uid; /* uid of previous owner of blocks */ int fb_ref; /* Children outstanding. */ int fb_cgwait; /* cg writes outstanding. */ }; /* * A "freework" structure handles the release of a tree of blocks or a single * block. Each indirect block in a tree is allocated its own freework * structure so that the indirect block may be freed only when all of its * children are freed. In this way we enforce the rule that an allocated * block must have a valid path to a root that is journaled. Each child * block acquires a reference and when the ref hits zero the parent ref * is decremented. If there is no parent the freeblks ref is decremented. */ struct freework { struct worklist fw_list; /* Delayed worklist. */ # define fw_state fw_list.wk_state LIST_ENTRY(freework) fw_segs; /* Seg list. */ TAILQ_ENTRY(freework) fw_next; /* Hash/Trunc list. */ struct jnewblk *fw_jnewblk; /* Journal entry to cancel. */ struct freeblks *fw_freeblks; /* Root of operation. */ struct freework *fw_parent; /* Parent indirect. */ struct indirdep *fw_indir; /* indirect block. */ ufs2_daddr_t fw_blkno; /* Our block #. */ ufs_lbn_t fw_lbn; /* Original lbn before free. */ uint16_t fw_frags; /* Number of frags. */ uint16_t fw_ref; /* Number of children out. */ uint16_t fw_off; /* Current working position. */ uint16_t fw_start; /* Start of partial truncate. */ }; /* * A "freedep" structure is allocated to track the completion of a bitmap * write for a freework. One freedep may cover many freed blocks so long * as they reside in the same cylinder group. When the cg is written * the freedep decrements the ref on the freework which may permit it * to be freed as well. */ struct freedep { struct worklist fd_list; /* Delayed worklist. */ struct freework *fd_freework; /* Parent freework. */ }; /* * A "freefile" structure is attached to an inode when its * link count is reduced to zero. It marks the inode as free in * the cylinder group map after the zero'ed inode has been written * to disk and any associated blocks and fragments have been freed. */ struct freefile { struct worklist fx_list; /* id_inowait or delayed worklist */ mode_t fx_mode; /* mode of inode */ ino_t fx_oldinum; /* inum of the unlinked file */ struct vnode *fx_devvp; /* filesystem device vnode */ struct workhead fx_jwork; /* journal work pending. */ }; /* * A "diradd" structure is linked to an "inodedep" id_inowait list when a * new directory entry is allocated that references the inode described * by "inodedep". When the inode itself is written (either the initial * allocation for new inodes or with the increased link count for * existing inodes), the COMPLETE flag is set in da_state. If the entry * is for a newly allocated inode, the "inodedep" structure is associated * with a bmsafemap which prevents the inode from being written to disk * until the cylinder group has been updated. Thus the da_state COMPLETE * flag cannot be set until the inode bitmap dependency has been removed. * When creating a new file, it is safe to write the directory entry that * claims the inode once the referenced inode has been written. Since * writing the inode clears the bitmap dependencies, the DEPCOMPLETE flag * in the diradd can be set unconditionally when creating a file. When * creating a directory, there are two additional dependencies described by * mkdir structures (see their description below). When these dependencies * are resolved the DEPCOMPLETE flag is set in the diradd structure. * If there are multiple links created to the same inode, there will be * a separate diradd structure created for each link. The diradd is * linked onto the pg_diraddhd list of the pagedep for the directory * page that contains the entry. When a directory page is written, * the pg_diraddhd list is traversed to rollback any entries that are * not yet ready to be written to disk. If a directory entry is being * changed (by rename) rather than added, the DIRCHG flag is set and * the da_previous entry points to the entry that will be "removed" * once the new entry has been committed. During rollback, entries * with da_previous are replaced with the previous inode number rather * than zero. * * The overlaying of da_pagedep and da_previous is done to keep the * structure down. If a da_previous entry is present, the pointer to its * pagedep is available in the associated dirrem entry. If the DIRCHG flag * is set, the da_previous entry is valid; if not set the da_pagedep entry * is valid. The DIRCHG flag never changes; it is set when the structure * is created if appropriate and is never cleared. */ struct diradd { struct worklist da_list; /* id_inowait or id_pendinghd list */ # define da_state da_list.wk_state /* state of the new directory entry */ LIST_ENTRY(diradd) da_pdlist; /* pagedep holding directory block */ doff_t da_offset; /* offset of new dir entry in dir blk */ ino_t da_newinum; /* inode number for the new dir entry */ union { struct dirrem *dau_previous; /* entry being replaced in dir change */ struct pagedep *dau_pagedep; /* pagedep dependency for addition */ } da_un; struct workhead da_jwork; /* Journal work awaiting completion. */ }; #define da_previous da_un.dau_previous #define da_pagedep da_un.dau_pagedep /* * Two "mkdir" structures are needed to track the additional dependencies * associated with creating a new directory entry. Normally a directory * addition can be committed as soon as the newly referenced inode has been * written to disk with its increased link count. When a directory is * created there are two additional dependencies: writing the directory * data block containing the "." and ".." entries (MKDIR_BODY) and writing * the parent inode with the increased link count for ".." (MKDIR_PARENT). * These additional dependencies are tracked by two mkdir structures that * reference the associated "diradd" structure. When they have completed, * they set the DEPCOMPLETE flag on the diradd so that it knows that its * extra dependencies have been completed. The md_state field is used only * to identify which type of dependency the mkdir structure is tracking. * It is not used in the mainline code for any purpose other than consistency * checking. All the mkdir structures in the system are linked together on * a list. This list is needed so that a diradd can find its associated * mkdir structures and deallocate them if it is prematurely freed (as for * example if a mkdir is immediately followed by a rmdir of the same directory). * Here, the free of the diradd must traverse the list to find the associated * mkdir structures that reference it. The deletion would be faster if the * diradd structure were simply augmented to have two pointers that referenced * the associated mkdir's. However, this would increase the size of the diradd * structure to speed a very infrequent operation. */ struct mkdir { struct worklist md_list; /* id_inowait or buffer holding dir */ # define md_state md_list.wk_state /* type: MKDIR_PARENT or MKDIR_BODY */ struct diradd *md_diradd; /* associated diradd */ struct jaddref *md_jaddref; /* dependent jaddref. */ struct buf *md_buf; /* MKDIR_BODY: buffer holding dir */ LIST_ENTRY(mkdir) md_mkdirs; /* list of all mkdirs */ }; /* * A "dirrem" structure describes an operation to decrement the link * count on an inode. The dirrem structure is attached to the pg_dirremhd * list of the pagedep for the directory page that contains the entry. * It is processed after the directory page with the deleted entry has * been written to disk. */ struct dirrem { struct worklist dm_list; /* delayed worklist */ # define dm_state dm_list.wk_state /* state of the old directory entry */ LIST_ENTRY(dirrem) dm_next; /* pagedep's list of dirrem's */ LIST_ENTRY(dirrem) dm_inonext; /* inodedep's list of dirrem's */ struct jremrefhd dm_jremrefhd; /* Pending remove reference deps. */ ino_t dm_oldinum; /* inum of the removed dir entry */ doff_t dm_offset; /* offset of removed dir entry in blk */ union { struct pagedep *dmu_pagedep; /* pagedep dependency for remove */ ino_t dmu_dirinum; /* parent inode number (for rmdir) */ } dm_un; struct workhead dm_jwork; /* Journal work awaiting completion. */ }; #define dm_pagedep dm_un.dmu_pagedep #define dm_dirinum dm_un.dmu_dirinum /* * A "newdirblk" structure tracks the progress of a newly allocated * directory block from its creation until it is claimed by its on-disk * inode. When a block is allocated to a directory, an fsync of a file * whose name is within that block must ensure not only that the block * containing the file name has been written, but also that the on-disk * inode references that block. When a new directory block is created, * we allocate a newdirblk structure which is linked to the associated * allocdirect (on its ad_newdirblk list). When the allocdirect has been * satisfied, the newdirblk structure is moved to the inodedep id_bufwait * list of its directory to await the inode being written. When the inode * is written, the directory entries are fully committed and can be * deleted from their pagedep->id_pendinghd and inodedep->id_pendinghd * lists. Note that we could track directory blocks allocated to indirect * blocks using a similar scheme with the allocindir structures. Rather * than adding this level of complexity, we simply write those newly * allocated indirect blocks synchronously as such allocations are rare. * In the case of a new directory the . and .. links are tracked with * a mkdir rather than a pagedep. In this case we track the mkdir * so it can be released when it is written. A workhead is used * to simplify canceling a mkdir that is removed by a subsequent dirrem. */ struct newdirblk { struct worklist db_list; /* id_inowait or pg_newdirblk */ # define db_state db_list.wk_state struct pagedep *db_pagedep; /* associated pagedep */ struct workhead db_mkdir; }; /* * The inoref structure holds the elements common to jaddref and jremref * so they may easily be queued in-order on the inodedep. */ struct inoref { struct worklist if_list; /* Journal pending or jseg entries. */ # define if_state if_list.wk_state TAILQ_ENTRY(inoref) if_deps; /* Links for inodedep. */ struct jsegdep *if_jsegdep; /* Will track our journal record. */ off_t if_diroff; /* Directory offset. */ ino_t if_ino; /* Inode number. */ ino_t if_parent; /* Parent inode number. */ nlink_t if_nlink; /* nlink before addition. */ uint16_t if_mode; /* File mode, needed for IFMT. */ }; /* * A "jaddref" structure tracks a new reference (link count) on an inode * and prevents the link count increase and bitmap allocation until a * journal entry can be written. Once the journal entry is written, * the inode is put on the pendinghd of the bmsafemap and a diradd or * mkdir entry is placed on the bufwait list of the inode. The DEPCOMPLETE * flag is used to indicate that all of the required information for writing * the journal entry is present. MKDIR_BODY and MKDIR_PARENT are used to * differentiate . and .. links from regular file names. NEWBLOCK indicates * a bitmap is still pending. If a new reference is canceled by a delete * prior to writing the journal the jaddref write is canceled and the * structure persists to prevent any disk-visible changes until it is * ultimately released when the file is freed or the link is dropped again. */ struct jaddref { struct inoref ja_ref; /* see inoref above. */ # define ja_list ja_ref.if_list /* Jrnl pending, id_inowait, dm_jwork.*/ # define ja_state ja_ref.if_list.wk_state LIST_ENTRY(jaddref) ja_bmdeps; /* Links for bmsafemap. */ union { struct diradd *jau_diradd; /* Pending diradd. */ struct mkdir *jau_mkdir; /* MKDIR_{PARENT,BODY} */ } ja_un; }; #define ja_diradd ja_un.jau_diradd #define ja_mkdir ja_un.jau_mkdir #define ja_diroff ja_ref.if_diroff #define ja_ino ja_ref.if_ino #define ja_parent ja_ref.if_parent #define ja_mode ja_ref.if_mode /* * A "jremref" structure tracks a removed reference (unlink) on an * inode and prevents the directory remove from proceeding until the * journal entry is written. Once the journal has been written the remove * may proceed as normal. */ struct jremref { struct inoref jr_ref; /* see inoref above. */ # define jr_list jr_ref.if_list /* Linked to softdep_journal_pending. */ # define jr_state jr_ref.if_list.wk_state LIST_ENTRY(jremref) jr_deps; /* Links for dirrem. */ struct dirrem *jr_dirrem; /* Back pointer to dirrem. */ }; /* * A "jmvref" structure tracks a name relocations within the same * directory block that occur as a result of directory compaction. * It prevents the updated directory entry from being written to disk * until the journal entry is written. Once the journal has been * written the compacted directory may be written to disk. */ struct jmvref { struct worklist jm_list; /* Linked to softdep_journal_pending. */ LIST_ENTRY(jmvref) jm_deps; /* Jmvref on pagedep. */ struct pagedep *jm_pagedep; /* Back pointer to pagedep. */ ino_t jm_parent; /* Containing directory inode number. */ ino_t jm_ino; /* Inode number of our entry. */ off_t jm_oldoff; /* Our old offset in directory. */ off_t jm_newoff; /* Our new offset in directory. */ }; /* * A "jnewblk" structure tracks a newly allocated block or fragment and * prevents the direct or indirect block pointer as well as the cg bitmap * from being written until it is logged. After it is logged the jsegdep * is attached to the allocdirect or allocindir until the operation is * completed or reverted. If the operation is reverted prior to the journal * write the jnewblk structure is maintained to prevent the bitmaps from * reaching the disk. Ultimately the jnewblk structure will be passed * to the free routine as the in memory cg is modified back to the free * state at which time it can be released. It may be held on any of the * fx_jwork, fw_jwork, fb_jwork, ff_jwork, nb_jwork, or ir_jwork lists. */ struct jnewblk { struct worklist jn_list; /* See lists above. */ # define jn_state jn_list.wk_state struct jsegdep *jn_jsegdep; /* Will track our journal record. */ LIST_ENTRY(jnewblk) jn_deps; /* Jnewblks on sm_jnewblkhd. */ struct worklist *jn_dep; /* Dependency to ref completed seg. */ ufs_lbn_t jn_lbn; /* Lbn to which allocated. */ ufs2_daddr_t jn_blkno; /* Blkno allocated */ ino_t jn_ino; /* Ino to which allocated. */ int jn_oldfrags; /* Previous fragments when extended. */ int jn_frags; /* Number of fragments. */ }; /* * A "jblkdep" structure tracks jfreeblk and jtrunc records attached to a * freeblks structure. */ struct jblkdep { struct worklist jb_list; /* For softdep journal pending. */ struct jsegdep *jb_jsegdep; /* Reference to the jseg. */ struct freeblks *jb_freeblks; /* Back pointer to freeblks. */ LIST_ENTRY(jblkdep) jb_deps; /* Dep list on freeblks. */ }; /* * A "jfreeblk" structure tracks the journal write for freeing a block * or tree of blocks. The block pointer must not be cleared in the inode * or indirect prior to the jfreeblk being written to the journal. */ struct jfreeblk { struct jblkdep jf_dep; /* freeblks linkage. */ ufs_lbn_t jf_lbn; /* Lbn from which blocks freed. */ ufs2_daddr_t jf_blkno; /* Blkno being freed. */ ino_t jf_ino; /* Ino from which blocks freed. */ int jf_frags; /* Number of frags being freed. */ }; /* * A "jfreefrag" tracks the freeing of a single block when a fragment is * extended or an indirect page is replaced. It is not part of a larger * freeblks operation. */ struct jfreefrag { struct worklist fr_list; /* Linked to softdep_journal_pending. */ # define fr_state fr_list.wk_state struct jsegdep *fr_jsegdep; /* Will track our journal record. */ struct freefrag *fr_freefrag; /* Back pointer to freefrag. */ ufs_lbn_t fr_lbn; /* Lbn from which frag freed. */ ufs2_daddr_t fr_blkno; /* Blkno being freed. */ ino_t fr_ino; /* Ino from which frag freed. */ int fr_frags; /* Size of frag being freed. */ }; /* * A "jtrunc" journals the intent to truncate an inode's data or extent area. */ struct jtrunc { struct jblkdep jt_dep; /* freeblks linkage. */ off_t jt_size; /* Final file size. */ int jt_extsize; /* Final extent size. */ ino_t jt_ino; /* Ino being truncated. */ }; /* * A "jfsync" journals the completion of an fsync which invalidates earlier * jtrunc records in the journal. */ struct jfsync { struct worklist jfs_list; /* For softdep journal pending. */ off_t jfs_size; /* Sync file size. */ int jfs_extsize; /* Sync extent size. */ ino_t jfs_ino; /* ino being synced. */ }; /* * A "jsegdep" structure tracks a single reference to a written journal * segment so the journal space can be reclaimed when all dependencies * have been written. It can hang off of id_inowait, dm_jwork, da_jwork, * nb_jwork, ff_jwork, or fb_jwork lists. */ struct jsegdep { struct worklist jd_list; /* See above for lists. */ # define jd_state jd_list.wk_state struct jseg *jd_seg; /* Our journal record. */ }; /* * A "jseg" structure contains all of the journal records written in a * single disk write. The jaddref and jremref structures are linked into * js_entries so thay may be completed when the write completes. The * js_entries also include the write dependency structures: jmvref, * jnewblk, jfreeblk, jfreefrag, and jtrunc. The js_refs field counts * the number of entries on the js_entries list. Thus there is a single * jseg entry to describe each journal write. */ struct jseg { struct worklist js_list; /* b_deps link for journal */ # define js_state js_list.wk_state struct workhead js_entries; /* Entries awaiting write */ LIST_HEAD(, freework) js_indirs;/* List of indirects in this seg. */ TAILQ_ENTRY(jseg) js_next; /* List of all unfinished segments. */ struct jblocks *js_jblocks; /* Back pointer to block/seg list */ struct buf *js_buf; /* Buffer while unwritten */ uint64_t js_seq; /* Journal record sequence number. */ uint64_t js_oldseq; /* Oldest valid sequence number. */ int js_size; /* Size of journal record in bytes. */ int js_cnt; /* Total items allocated. */ int js_refs; /* Count of js_entries items. */ }; /* * A 'sbdep' structure tracks the head of the free inode list and * superblock writes. This makes sure the superblock is always pointing at * the first possible unlinked inode for the suj recovery process. If a * block write completes and we discover a new head is available the buf * is dirtied and the dep is kept. See the description of the UNLINK* * flags above for more details. */ struct sbdep { struct worklist sb_list; /* b_dep linkage */ struct fs *sb_fs; /* Filesystem pointer within buf. */ struct ufsmount *sb_ump; /* Our mount structure */ }; /* * Private journaling structures. */ struct jblocks { struct jseglst jb_segs; /* TAILQ of current segments. */ struct jseg *jb_writeseg; /* Next write to complete. */ struct jseg *jb_oldestseg; /* Oldest segment with valid entries. */ struct jextent *jb_extent; /* Extent array. */ uint64_t jb_nextseq; /* Next sequence number. */ uint64_t jb_oldestwrseq; /* Oldest written sequence number. */ uint8_t jb_needseg; /* Need a forced segment. */ uint8_t jb_suspended; /* Did journal suspend writes? */ int jb_avail; /* Available extents. */ int jb_used; /* Last used extent. */ int jb_head; /* Allocator head. */ int jb_off; /* Allocator extent offset. */ int jb_blocks; /* Total disk blocks covered. */ int jb_free; /* Total disk blocks free. */ int jb_min; /* Minimum free space. */ int jb_low; /* Low on space. */ int jb_age; /* Insertion time of oldest rec. */ }; struct jextent { ufs2_daddr_t je_daddr; /* Disk block address. */ int je_blocks; /* Disk block count. */ }; /* * Hash table declarations. */ LIST_HEAD(mkdirlist, mkdir); LIST_HEAD(pagedep_hashhead, pagedep); LIST_HEAD(inodedep_hashhead, inodedep); LIST_HEAD(newblk_hashhead, newblk); LIST_HEAD(bmsafemap_hashhead, bmsafemap); TAILQ_HEAD(indir_hashhead, freework); /* * Per-filesystem soft dependency data. * Allocated at mount and freed at unmount. */ struct mount_softdeps { struct rwlock sd_fslock; /* softdep lock */ struct workhead sd_workitem_pending; /* softdep work queue */ struct worklist *sd_worklist_tail; /* Tail pointer for above */ struct workhead sd_journal_pending; /* journal work queue */ struct worklist *sd_journal_tail; /* Tail pointer for above */ struct jblocks *sd_jblocks; /* Journal block information */ struct inodedeplst sd_unlinked; /* Unlinked inodes */ struct bmsafemaphd sd_dirtycg; /* Dirty CGs */ struct mkdirlist sd_mkdirlisthd; /* Track mkdirs */ struct pagedep_hashhead *sd_pdhash; /* pagedep hash table */ u_long sd_pdhashsize; /* pagedep hash table size-1 */ long sd_pdnextclean; /* next hash bucket to clean */ struct inodedep_hashhead *sd_idhash; /* inodedep hash table */ u_long sd_idhashsize; /* inodedep hash table size-1 */ long sd_idnextclean; /* next hash bucket to clean */ struct newblk_hashhead *sd_newblkhash; /* newblk hash table */ u_long sd_newblkhashsize; /* newblk hash table size-1 */ struct bmsafemap_hashhead *sd_bmhash; /* bmsafemap hash table */ u_long sd_bmhashsize; /* bmsafemap hash table size-1*/ struct indir_hashhead *sd_indirhash; /* indir hash table */ u_long sd_indirhashsize; /* indir hash table size-1 */ int sd_on_journal; /* Items on the journal list */ int sd_on_worklist; /* Items on the worklist */ int sd_deps; /* Total dependency count */ int sd_accdeps; /* accumulated dep count */ int sd_req; /* Wakeup when deps hits 0. */ int sd_flags; /* comm with flushing thread */ int sd_cleanups; /* Calls to cleanup */ struct thread *sd_flushtd; /* thread handling flushing */ TAILQ_ENTRY(mount_softdeps) sd_next; /* List of softdep filesystem */ struct ufsmount *sd_ump; /* our ufsmount structure */ u_long sd_curdeps[D_LAST + 1]; /* count of current deps */ }; /* * Flags for communicating with the syncer thread. */ #define FLUSH_EXIT 0x0001 /* time to exit */ #define FLUSH_CLEANUP 0x0002 /* need to clear out softdep structures */ #define FLUSH_STARTING 0x0004 /* flush thread not yet started */ +#define FLUSH_RC_ACTIVE 0x0008 /* a thread is flushing the mount point */ /* * Keep the old names from when these were in the ufsmount structure. */ #define softdep_workitem_pending um_softdep->sd_workitem_pending #define softdep_worklist_tail um_softdep->sd_worklist_tail #define softdep_journal_pending um_softdep->sd_journal_pending #define softdep_journal_tail um_softdep->sd_journal_tail #define softdep_jblocks um_softdep->sd_jblocks #define softdep_unlinked um_softdep->sd_unlinked #define softdep_dirtycg um_softdep->sd_dirtycg #define softdep_mkdirlisthd um_softdep->sd_mkdirlisthd #define pagedep_hashtbl um_softdep->sd_pdhash #define pagedep_hash_size um_softdep->sd_pdhashsize #define pagedep_nextclean um_softdep->sd_pdnextclean #define inodedep_hashtbl um_softdep->sd_idhash #define inodedep_hash_size um_softdep->sd_idhashsize #define inodedep_nextclean um_softdep->sd_idnextclean #define newblk_hashtbl um_softdep->sd_newblkhash #define newblk_hash_size um_softdep->sd_newblkhashsize #define bmsafemap_hashtbl um_softdep->sd_bmhash #define bmsafemap_hash_size um_softdep->sd_bmhashsize #define indir_hashtbl um_softdep->sd_indirhash #define indir_hash_size um_softdep->sd_indirhashsize #define softdep_on_journal um_softdep->sd_on_journal #define softdep_on_worklist um_softdep->sd_on_worklist #define softdep_deps um_softdep->sd_deps #define softdep_accdeps um_softdep->sd_accdeps #define softdep_req um_softdep->sd_req #define softdep_flags um_softdep->sd_flags #define softdep_flushtd um_softdep->sd_flushtd #define softdep_curdeps um_softdep->sd_curdeps Index: projects/clang500-import/usr.bin/diff/diff.c =================================================================== --- projects/clang500-import/usr.bin/diff/diff.c (revision 319548) +++ projects/clang500-import/usr.bin/diff/diff.c (revision 319549) @@ -1,466 +1,466 @@ /* $OpenBSD: diff.c,v 1.65 2015/12/29 19:04:46 gsoares Exp $ */ /* * Copyright (c) 2003 Todd C. Miller * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F39502-99-1-0512. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "diff.h" #include "xmalloc.h" int lflag, Nflag, Pflag, rflag, sflag, Tflag, cflag; int diff_format, diff_context, status, ignore_file_case; int tabsize = 8; char *start, *ifdefname, *diffargs, *label[2], *ignore_pats; char *group_format = NULL; struct stat stb1, stb2; struct excludes *excludes_list; regex_t ignore_re; -#define OPTIONS "0123456789aBbC:cdD:efhI:iL:lnNPpqrS:sTtU:uwX:x:" +#define OPTIONS "0123456789aBbC:cdD:efHhI:iL:lnNPpqrS:sTtU:uwX:x:" enum { OPT_TSIZE = CHAR_MAX + 1, OPT_STRIPCR, OPT_IGN_FN_CASE, OPT_NO_IGN_FN_CASE, OPT_NORMAL, OPT_HORIZON_LINES, - OPT_SPEED_LARGE_FILES, OPT_CHANGED_GROUP_FORMAT, }; static struct option longopts[] = { { "text", no_argument, 0, 'a' }, { "ignore-space-change", no_argument, 0, 'b' }, { "context", optional_argument, 0, 'C' }, { "ifdef", required_argument, 0, 'D' }, { "minimal", no_argument, 0, 'd' }, { "ed", no_argument, 0, 'e' }, { "forward-ed", no_argument, 0, 'f' }, + { "speed-large-files", no_argument, NULL, 'H' }, { "ignore-matching-lines", required_argument, 0, 'I' }, { "ignore-case", no_argument, 0, 'i' }, { "paginate", no_argument, NULL, 'l' }, { "label", required_argument, 0, 'L' }, { "new-file", no_argument, 0, 'N' }, { "rcs", no_argument, 0, 'n' }, { "unidirectional-new-file", no_argument, 0, 'P' }, { "show-c-function", no_argument, 0, 'p' }, { "brief", no_argument, 0, 'q' }, { "recursive", no_argument, 0, 'r' }, { "report-identical-files", no_argument, 0, 's' }, { "starting-file", required_argument, 0, 'S' }, { "expand-tabs", no_argument, 0, 't' }, { "initial-tab", no_argument, 0, 'T' }, { "unified", optional_argument, 0, 'U' }, { "ignore-all-space", no_argument, 0, 'w' }, { "exclude", required_argument, 0, 'x' }, { "exclude-from", required_argument, 0, 'X' }, { "ignore-file-name-case", no_argument, NULL, OPT_IGN_FN_CASE }, { "horizon-lines", required_argument, NULL, OPT_HORIZON_LINES }, { "no-ignore-file-name-case", no_argument, NULL, OPT_NO_IGN_FN_CASE }, { "normal", no_argument, NULL, OPT_NORMAL }, - { "speed-large-files", no_argument, NULL, OPT_SPEED_LARGE_FILES}, { "strip-trailing-cr", no_argument, NULL, OPT_STRIPCR }, { "tabsize", optional_argument, NULL, OPT_TSIZE }, { "changed-group-format", required_argument, NULL, OPT_CHANGED_GROUP_FORMAT}, { NULL, 0, 0, '\0'} }; void usage(void) __dead2; void push_excludes(char *); void push_ignore_pats(char *); void read_excludes_file(char *file); void set_argstr(char **, char **); int main(int argc, char **argv) { const char *errstr = NULL; char *ep, **oargv; long l; int ch, dflags, lastch, gotstdin, prevoptind, newarg; oargv = argv; gotstdin = 0; dflags = 0; lastch = '\0'; prevoptind = 1; newarg = 1; diff_context = 3; diff_format = 0; while ((ch = getopt_long(argc, argv, OPTIONS, longopts, NULL)) != -1) { switch (ch) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (newarg) usage(); /* disallow -[0-9]+ */ else if (lastch == 'c' || lastch == 'u') diff_context = 0; else if (!isdigit(lastch) || diff_context > INT_MAX / 10) usage(); diff_context = (diff_context * 10) + (ch - '0'); break; case 'a': dflags |= D_FORCEASCII; break; case 'b': dflags |= D_FOLDBLANKS; break; case 'C': case 'c': cflag = 1; diff_format = D_CONTEXT; if (optarg != NULL) { l = strtol(optarg, &ep, 10); if (*ep != '\0' || l < 0 || l >= INT_MAX) usage(); diff_context = (int)l; } break; case 'd': dflags |= D_MINIMAL; break; case 'D': diff_format = D_IFDEF; ifdefname = optarg; break; case 'e': diff_format = D_EDIT; break; case 'f': diff_format = D_REVERSE; break; + case 'H': + /* ignore but needed for compatibility with GNU diff */ + break; case 'h': /* silently ignore for backwards compatibility */ break; case 'I': push_ignore_pats(optarg); break; case 'i': dflags |= D_IGNORECASE; break; case 'L': if (label[0] == NULL) label[0] = optarg; else if (label[1] == NULL) label[1] = optarg; else usage(); break; case 'l': lflag = 1; break; case 'N': Nflag = 1; break; case 'n': diff_format = D_NREVERSE; break; case 'p': if (diff_format == 0) diff_format = D_CONTEXT; dflags |= D_PROTOTYPE; break; case 'P': Pflag = 1; break; case 'r': rflag = 1; break; case 'q': diff_format = D_BRIEF; break; case 'S': start = optarg; break; case 's': sflag = 1; break; case 'T': Tflag = 1; break; case 't': dflags |= D_EXPANDTABS; break; case 'U': case 'u': diff_format = D_UNIFIED; if (optarg != NULL) { l = strtol(optarg, &ep, 10); if (*ep != '\0' || l < 0 || l >= INT_MAX) usage(); diff_context = (int)l; } break; case 'w': dflags |= D_IGNOREBLANKS; break; case 'X': read_excludes_file(optarg); break; case 'x': push_excludes(optarg); break; case OPT_CHANGED_GROUP_FORMAT: diff_format = D_GFORMAT; group_format = optarg; break; case OPT_HORIZON_LINES: break; /* XXX TODO for compatibility with GNU diff3 */ case OPT_IGN_FN_CASE: ignore_file_case = 1; break; case OPT_NO_IGN_FN_CASE: ignore_file_case = 0; break; case OPT_NORMAL: diff_format = D_NORMAL; break; case OPT_TSIZE: tabsize = (int) strtonum(optarg, 1, INT_MAX, &errstr); if (errstr) { warnx("Invalid argument for tabsize"); usage(); } break; - case OPT_SPEED_LARGE_FILES: - break; /* ignore but needed for compatibility with GNU diff */ case OPT_STRIPCR: dflags |= D_STRIPCR; break; default: usage(); break; } lastch = ch; newarg = optind != prevoptind; prevoptind = optind; } argc -= optind; argv += optind; #ifdef __OpenBSD__ if (pledge("stdio rpath tmppath", NULL) == -1) err(2, "pledge"); #endif /* * Do sanity checks, fill in stb1 and stb2 and call the appropriate * driver routine. Both drivers use the contents of stb1 and stb2. */ if (argc != 2) usage(); if (ignore_pats != NULL) { char buf[BUFSIZ]; int error; if ((error = regcomp(&ignore_re, ignore_pats, REG_NEWLINE | REG_EXTENDED)) != 0) { regerror(error, &ignore_re, buf, sizeof(buf)); if (*ignore_pats != '\0') errx(2, "%s: %s", ignore_pats, buf); else errx(2, "%s", buf); } } if (strcmp(argv[0], "-") == 0) { fstat(STDIN_FILENO, &stb1); gotstdin = 1; } else if (stat(argv[0], &stb1) != 0) err(2, "%s", argv[0]); if (strcmp(argv[1], "-") == 0) { fstat(STDIN_FILENO, &stb2); gotstdin = 1; } else if (stat(argv[1], &stb2) != 0) err(2, "%s", argv[1]); if (gotstdin && (S_ISDIR(stb1.st_mode) || S_ISDIR(stb2.st_mode))) errx(2, "can't compare - to a directory"); set_argstr(oargv, argv); if (S_ISDIR(stb1.st_mode) && S_ISDIR(stb2.st_mode)) { if (diff_format == D_IFDEF) errx(2, "-D option not supported with directories"); diffdir(argv[0], argv[1], dflags); } else { if (S_ISDIR(stb1.st_mode)) { argv[0] = splice(argv[0], argv[1]); if (stat(argv[0], &stb1) < 0) err(2, "%s", argv[0]); } if (S_ISDIR(stb2.st_mode)) { argv[1] = splice(argv[1], argv[0]); if (stat(argv[1], &stb2) < 0) err(2, "%s", argv[1]); } print_status(diffreg(argv[0], argv[1], dflags, 1), argv[0], argv[1], ""); } exit(status); } void set_argstr(char **av, char **ave) { size_t argsize; char **ap; argsize = 4 + *ave - *av + 1; diffargs = xmalloc(argsize); strlcpy(diffargs, "diff", argsize); for (ap = av + 1; ap < ave; ap++) { if (strcmp(*ap, "--") != 0) { strlcat(diffargs, " ", argsize); strlcat(diffargs, *ap, argsize); } } } /* * Read in an excludes file and push each line. */ void read_excludes_file(char *file) { FILE *fp; char *buf, *pattern; size_t len; if (strcmp(file, "-") == 0) fp = stdin; else if ((fp = fopen(file, "r")) == NULL) err(2, "%s", file); while ((buf = fgetln(fp, &len)) != NULL) { if (buf[len - 1] == '\n') len--; if ((pattern = strndup(buf, len)) == NULL) err(2, "xstrndup"); push_excludes(pattern); } if (strcmp(file, "-") != 0) fclose(fp); } /* * Push a pattern onto the excludes list. */ void push_excludes(char *pattern) { struct excludes *entry; entry = xmalloc(sizeof(*entry)); entry->pattern = pattern; entry->next = excludes_list; excludes_list = entry; } void push_ignore_pats(char *pattern) { size_t len; if (ignore_pats == NULL) ignore_pats = xstrdup(pattern); else { /* old + "|" + new + NUL */ len = strlen(ignore_pats) + strlen(pattern) + 2; ignore_pats = xreallocarray(ignore_pats, 1, len); strlcat(ignore_pats, "|", len); strlcat(ignore_pats, pattern, len); } } void print_only(const char *path, size_t dirlen, const char *entry) { if (dirlen > 1) dirlen--; printf("Only in %.*s: %s\n", (int)dirlen, path, entry); } void print_status(int val, char *path1, char *path2, const char *entry) { switch (val) { case D_BINARY: printf("Binary files %s%s and %s%s differ\n", path1, entry, path2, entry); break; case D_DIFFER: if (diff_format == D_BRIEF) printf("Files %s%s and %s%s differ\n", path1, entry, path2, entry); break; case D_SAME: if (sflag) printf("Files %s%s and %s%s are identical\n", path1, entry, path2, entry); break; case D_MISMATCH1: printf("File %s%s is a directory while file %s%s is a regular file\n", path1, entry, path2, entry); break; case D_MISMATCH2: printf("File %s%s is a regular file while file %s%s is a directory\n", path1, entry, path2, entry); break; case D_SKIPPED1: printf("File %s%s is not a regular file or directory and was skipped\n", path1, entry); break; case D_SKIPPED2: printf("File %s%s is not a regular file or directory and was skipped\n", path2, entry); break; } } void usage(void) { (void)fprintf(stderr, "usage: diff [-abdilpTtw] [-c | -e | -f | -n | -q | -u] [--ignore-case]\n" " [--no-ignore-case] [--normal] [--strip-trailing-cr] [--tabsize]\n" " [-I pattern] [-L label] file1 file2\n" " diff [-abdilpTtw] [-I pattern] [-L label] [--ignore-case]\n" " [--no-ignore-case] [--normal] [--strip-trailing-cr] [--tabsize]\n" " -C number file1 file2\n" " diff [-abdiltw] [-I pattern] [--ignore-case] [--no-ignore-case]\n" " [--normal] [--strip-trailing-cr] [--tabsize] -D string file1 file2\n" " diff [-abdilpTtw] [-I pattern] [-L label] [--ignore-case]\n" " [--no-ignore-case] [--normal] [--tabsize] [--strip-trailing-cr]\n" " -U number file1 file2\n" " diff [-abdilNPprsTtw] [-c | -e | -f | -n | -q | -u] [--ignore-case]\n" " [--no-ignore-case] [--normal] [--tabsize] [-I pattern] [-L label]\n" " [-S name] [-X file] [-x pattern] dir1 dir2\n"); exit(2); } Index: projects/clang500-import/usr.bin/kdump/kdump.c =================================================================== --- projects/clang500-import/usr.bin/kdump/kdump.c (revision 319548) +++ projects/clang500-import/usr.bin/kdump/kdump.c (revision 319549) @@ -1,2025 +1,2032 @@ /*- * Copyright (c) 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1988, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint #if 0 static char sccsid[] = "@(#)kdump.c 8.1 (Berkeley) 6/6/93"; #endif #endif /* not lint */ #include __FBSDID("$FreeBSD$"); #define _WANT_KERNEL_ERRNO #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_LIBCASPER #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ktrace.h" #ifdef HAVE_LIBCASPER #include #include #include #endif u_int abidump(struct ktr_header *); int fetchprocinfo(struct ktr_header *, u_int *); int fread_tail(void *, int, int); void dumpheader(struct ktr_header *); void ktrsyscall(struct ktr_syscall *, u_int); void ktrsysret(struct ktr_sysret *, u_int); void ktrnamei(char *, int); void hexdump(char *, int, int); void visdump(char *, int, int); void ktrgenio(struct ktr_genio *, int); void ktrpsig(struct ktr_psig *); void ktrcsw(struct ktr_csw *); void ktrcsw_old(struct ktr_csw_old *); void ktruser(int, void *); void ktrcaprights(cap_rights_t *); void ktritimerval(struct itimerval *it); void ktrsockaddr(struct sockaddr *); void ktrstat(struct stat *); void ktrstruct(char *, size_t); void ktrcapfail(struct ktr_cap_fail *); void ktrfault(struct ktr_fault *); void ktrfaultend(struct ktr_faultend *); void usage(void); #define TIMESTAMP_NONE 0x0 #define TIMESTAMP_ABSOLUTE 0x1 #define TIMESTAMP_ELAPSED 0x2 #define TIMESTAMP_RELATIVE 0x4 static int timestamp, decimal, fancy = 1, suppressdata, tail, threads, maxdata, resolv = 0, abiflag = 0, syscallno = 0; static const char *tracefile = DEF_TRACEFILE; static struct ktr_header ktr_header; #define TIME_FORMAT "%b %e %T %Y" #define eqs(s1, s2) (strcmp((s1), (s2)) == 0) #define print_number64(first,i,n,c) do { \ uint64_t __v; \ \ if (quad_align && (((ptrdiff_t)((i) - (first))) & 1) == 1) { \ (i)++; \ (n)--; \ } \ if (quad_slots == 2) \ __v = (uint64_t)(uint32_t)(i)[0] | \ ((uint64_t)(uint32_t)(i)[1]) << 32; \ else \ __v = (uint64_t)*(i); \ if (decimal) \ printf("%c%jd", (c), (intmax_t)__v); \ else \ printf("%c%#jx", (c), (uintmax_t)__v); \ (i) += quad_slots; \ (n) -= quad_slots; \ (c) = ','; \ } while (0) #define print_number(i,n,c) do { \ if (decimal) \ printf("%c%jd", c, (intmax_t)*i); \ else \ printf("%c%#jx", c, (uintmax_t)(u_register_t)*i); \ i++; \ n--; \ c = ','; \ } while (0) struct proc_info { TAILQ_ENTRY(proc_info) info; u_int sv_flags; pid_t pid; }; static TAILQ_HEAD(trace_procs, proc_info) trace_procs; #ifdef HAVE_LIBCASPER static cap_channel_t *cappwd, *capgrp; #endif static void strerror_init(void) { /* * Cache NLS data before entering capability mode. * XXXPJD: There should be strerror_init() and strsignal_init() in libc. */ (void)catopen("libc", NL_CAT_LOCALE); } static void localtime_init(void) { time_t ltime; /* * Allow localtime(3) to cache /etc/localtime content before entering * capability mode. * XXXPJD: There should be localtime_init() in libc. */ (void)time(<ime); (void)localtime(<ime); } #ifdef HAVE_LIBCASPER static int cappwdgrp_setup(cap_channel_t **cappwdp, cap_channel_t **capgrpp) { cap_channel_t *capcas, *cappwdloc, *capgrploc; const char *cmds[1], *fields[1]; capcas = cap_init(); if (capcas == NULL) { err(1, "unable to create casper process"); exit(1); } cappwdloc = cap_service_open(capcas, "system.pwd"); capgrploc = cap_service_open(capcas, "system.grp"); /* Casper capability no longer needed. */ cap_close(capcas); if (cappwdloc == NULL || capgrploc == NULL) { if (cappwdloc == NULL) warn("unable to open system.pwd service"); if (capgrploc == NULL) warn("unable to open system.grp service"); exit(1); } /* Limit system.pwd to only getpwuid() function and pw_name field. */ cmds[0] = "getpwuid"; if (cap_pwd_limit_cmds(cappwdloc, cmds, 1) < 0) err(1, "unable to limit system.pwd service"); fields[0] = "pw_name"; if (cap_pwd_limit_fields(cappwdloc, fields, 1) < 0) err(1, "unable to limit system.pwd service"); /* Limit system.grp to only getgrgid() function and gr_name field. */ cmds[0] = "getgrgid"; if (cap_grp_limit_cmds(capgrploc, cmds, 1) < 0) err(1, "unable to limit system.grp service"); fields[0] = "gr_name"; if (cap_grp_limit_fields(capgrploc, fields, 1) < 0) err(1, "unable to limit system.grp service"); *cappwdp = cappwdloc; *capgrpp = capgrploc; return (0); } #endif /* HAVE_LIBCASPER */ static void print_integer_arg(const char *(*decoder)(int), int value) { const char *str; str = decoder(value); if (str != NULL) printf("%s", str); else { if (decimal) printf("", value); else printf("", value); } } /* Like print_integer_arg but unknown values are treated as valid. */ static void print_integer_arg_valid(const char *(*decoder)(int), int value) { const char *str; str = decoder(value); if (str != NULL) printf("%s", str); else { if (decimal) printf("%d", value); else printf("%#x", value); } } static void print_mask_arg(bool (*decoder)(FILE *, int, int *), int value) { bool invalid; int rem; printf("%#x<", value); invalid = !decoder(stdout, value, &rem); printf(">"); if (invalid) printf("%u", rem); } static void print_mask_arg0(bool (*decoder)(FILE *, int, int *), int value) { bool invalid; int rem; if (value == 0) { printf("0"); return; } printf("%#x<", value); invalid = !decoder(stdout, value, &rem); printf(">"); if (invalid) printf("%u", rem); } static void decode_fileflags(fflags_t value) { bool invalid; fflags_t rem; if (value == 0) { printf("0"); return; } printf("%#x<", value); invalid = !sysdecode_fileflags(stdout, value, &rem); printf(">"); if (invalid) printf("%u", rem); } static void decode_filemode(int value) { bool invalid; int rem; if (value == 0) { printf("0"); return; } printf("%#o<", value); invalid = !sysdecode_filemode(stdout, value, &rem); printf(">"); if (invalid) printf("%u", rem); } static void print_mask_arg32(bool (*decoder)(FILE *, uint32_t, uint32_t *), uint32_t value) { bool invalid; uint32_t rem; printf("%#x<", value); invalid = !decoder(stdout, value, &rem); printf(">"); if (invalid) printf("%u", rem); } static void print_mask_argul(bool (*decoder)(FILE *, u_long, u_long *), u_long value) { bool invalid; u_long rem; if (value == 0) { printf("0"); return; } printf("%#lx<", value); invalid = !decoder(stdout, value, &rem); printf(">"); if (invalid) printf("%lu", rem); } int main(int argc, char *argv[]) { int ch, ktrlen, size; void *m; int trpoints = ALL_POINTS; int drop_logged; pid_t pid = 0; u_int sv_flags; setlocale(LC_CTYPE, ""); timestamp = TIMESTAMP_NONE; while ((ch = getopt(argc,argv,"f:dElm:np:AHRrSsTt:")) != -1) switch (ch) { case 'A': abiflag = 1; break; case 'f': tracefile = optarg; break; case 'd': decimal = 1; break; case 'l': tail = 1; break; case 'm': maxdata = atoi(optarg); break; case 'n': fancy = 0; break; case 'p': pid = atoi(optarg); break; case 'r': resolv = 1; break; case 'S': syscallno = 1; break; case 's': suppressdata = 1; break; case 'E': timestamp |= TIMESTAMP_ELAPSED; break; case 'H': threads = 1; break; case 'R': timestamp |= TIMESTAMP_RELATIVE; break; case 'T': timestamp |= TIMESTAMP_ABSOLUTE; break; case 't': trpoints = getpoints(optarg); if (trpoints < 0) errx(1, "unknown trace point in %s", optarg); break; default: usage(); } if (argc > optind) usage(); m = malloc(size = 1025); if (m == NULL) errx(1, "%s", strerror(ENOMEM)); if (strcmp(tracefile, "-") != 0) if (!freopen(tracefile, "r", stdin)) err(1, "%s", tracefile); strerror_init(); localtime_init(); #ifdef HAVE_LIBCASPER if (resolv != 0) { if (cappwdgrp_setup(&cappwd, &capgrp) < 0) { cappwd = NULL; capgrp = NULL; } } if (resolv == 0 || (cappwd != NULL && capgrp != NULL)) { if (cap_enter() < 0 && errno != ENOSYS) err(1, "unable to enter capability mode"); } #else if (resolv == 0) { if (cap_enter() < 0 && errno != ENOSYS) err(1, "unable to enter capability mode"); } #endif if (caph_limit_stdio() == -1) err(1, "unable to limit stdio"); TAILQ_INIT(&trace_procs); drop_logged = 0; while (fread_tail(&ktr_header, sizeof(struct ktr_header), 1)) { if (ktr_header.ktr_type & KTR_DROP) { ktr_header.ktr_type &= ~KTR_DROP; if (!drop_logged && threads) { printf( "%6jd %6jd %-8.*s Events dropped.\n", (intmax_t)ktr_header.ktr_pid, ktr_header.ktr_tid > 0 ? (intmax_t)ktr_header.ktr_tid : 0, MAXCOMLEN, ktr_header.ktr_comm); drop_logged = 1; } else if (!drop_logged) { printf("%6jd %-8.*s Events dropped.\n", (intmax_t)ktr_header.ktr_pid, MAXCOMLEN, ktr_header.ktr_comm); drop_logged = 1; } } if (trpoints & (1< size) { m = realloc(m, ktrlen+1); if (m == NULL) errx(1, "%s", strerror(ENOMEM)); size = ktrlen; } if (ktrlen && fread_tail(m, ktrlen, 1) == 0) errx(1, "data too short"); if (fetchprocinfo(&ktr_header, (u_int *)m) != 0) continue; sv_flags = abidump(&ktr_header); if (pid && ktr_header.ktr_pid != pid && ktr_header.ktr_tid != pid) continue; if ((trpoints & (1<ktr_type) { case KTR_PROCCTOR: TAILQ_FOREACH(pi, &trace_procs, info) { if (pi->pid == kth->ktr_pid) { TAILQ_REMOVE(&trace_procs, pi, info); break; } } pi = malloc(sizeof(struct proc_info)); if (pi == NULL) errx(1, "%s", strerror(ENOMEM)); pi->sv_flags = *flags; pi->pid = kth->ktr_pid; TAILQ_INSERT_TAIL(&trace_procs, pi, info); return (1); case KTR_PROCDTOR: TAILQ_FOREACH(pi, &trace_procs, info) { if (pi->pid == kth->ktr_pid) { TAILQ_REMOVE(&trace_procs, pi, info); free(pi); break; } } return (1); } return (0); } u_int abidump(struct ktr_header *kth) { struct proc_info *pi; const char *abi; const char *arch; u_int flags = 0; TAILQ_FOREACH(pi, &trace_procs, info) { if (pi->pid == kth->ktr_pid) { flags = pi->sv_flags; break; } } if (abiflag == 0) return (flags); switch (flags & SV_ABI_MASK) { case SV_ABI_LINUX: abi = "L"; break; case SV_ABI_FREEBSD: abi = "F"; break; case SV_ABI_CLOUDABI: abi = "C"; break; default: abi = "U"; break; } if (flags & SV_LP64) arch = "64"; else if (flags & SV_ILP32) arch = "32"; else arch = "00"; printf("%s%s ", abi, arch); return (flags); } void dumpheader(struct ktr_header *kth) { static char unknown[64]; static struct timeval prevtime, prevtime_e; struct timeval temp; const char *type; const char *sign; switch (kth->ktr_type) { case KTR_SYSCALL: type = "CALL"; break; case KTR_SYSRET: type = "RET "; break; case KTR_NAMEI: type = "NAMI"; break; case KTR_GENIO: type = "GIO "; break; case KTR_PSIG: type = "PSIG"; break; case KTR_CSW: type = "CSW "; break; case KTR_USER: type = "USER"; break; case KTR_STRUCT: type = "STRU"; break; case KTR_SYSCTL: type = "SCTL"; break; case KTR_PROCCTOR: /* FALLTHROUGH */ case KTR_PROCDTOR: return; case KTR_CAPFAIL: type = "CAP "; break; case KTR_FAULT: type = "PFLT"; break; case KTR_FAULTEND: type = "PRET"; break; default: sprintf(unknown, "UNKNOWN(%d)", kth->ktr_type); type = unknown; } /* * The ktr_tid field was previously the ktr_buffer field, which held * the kernel pointer value for the buffer associated with data * following the record header. It now holds a threadid, but only * for trace files after the change. Older trace files still contain * kernel pointers. Detect this and suppress the results by printing * negative tid's as 0. */ if (threads) printf("%6jd %6jd %-8.*s ", (intmax_t)kth->ktr_pid, kth->ktr_tid > 0 ? (intmax_t)kth->ktr_tid : 0, MAXCOMLEN, kth->ktr_comm); else printf("%6jd %-8.*s ", (intmax_t)kth->ktr_pid, MAXCOMLEN, kth->ktr_comm); if (timestamp) { if (timestamp & TIMESTAMP_ABSOLUTE) { printf("%jd.%06ld ", (intmax_t)kth->ktr_time.tv_sec, kth->ktr_time.tv_usec); } if (timestamp & TIMESTAMP_ELAPSED) { if (prevtime_e.tv_sec == 0) prevtime_e = kth->ktr_time; timersub(&kth->ktr_time, &prevtime_e, &temp); printf("%jd.%06ld ", (intmax_t)temp.tv_sec, temp.tv_usec); } if (timestamp & TIMESTAMP_RELATIVE) { if (prevtime.tv_sec == 0) prevtime = kth->ktr_time; if (timercmp(&kth->ktr_time, &prevtime, <)) { timersub(&prevtime, &kth->ktr_time, &temp); sign = "-"; } else { timersub(&kth->ktr_time, &prevtime, &temp); sign = ""; } prevtime = kth->ktr_time; printf("%s%jd.%06ld ", sign, (intmax_t)temp.tv_sec, temp.tv_usec); } } printf("%s ", type); } #include static void ioctlname(unsigned long val) { const char *str; str = sysdecode_ioctlname(val); if (str != NULL) printf("%s", str); else if (decimal) printf("%lu", val); else printf("%#lx", val); } static enum sysdecode_abi syscallabi(u_int sv_flags) { if (sv_flags == 0) return (SYSDECODE_ABI_FREEBSD); switch (sv_flags & SV_ABI_MASK) { case SV_ABI_FREEBSD: return (SYSDECODE_ABI_FREEBSD); #if defined(__amd64__) || defined(__i386__) case SV_ABI_LINUX: #ifdef __amd64__ if (sv_flags & SV_ILP32) return (SYSDECODE_ABI_LINUX32); #endif return (SYSDECODE_ABI_LINUX); #endif #if defined(__aarch64__) || defined(__amd64__) case SV_ABI_CLOUDABI: return (SYSDECODE_ABI_CLOUDABI64); #endif default: return (SYSDECODE_ABI_UNKNOWN); } } static void syscallname(u_int code, u_int sv_flags) { const char *name; name = sysdecode_syscallname(syscallabi(sv_flags), code); if (name == NULL) printf("[%d]", code); else { printf("%s", name); if (syscallno) printf("[%d]", code); } } static void print_signal(int signo) { const char *signame; signame = sysdecode_signal(signo); if (signame != NULL) printf("%s", signame); else printf("SIG %d", signo); } void ktrsyscall(struct ktr_syscall *ktr, u_int sv_flags) { int narg = ktr->ktr_narg; register_t *ip, *first; intmax_t arg; int quad_align, quad_slots; syscallname(ktr->ktr_code, sv_flags); ip = first = &ktr->ktr_args[0]; if (narg) { char c = '('; if (fancy && (sv_flags == 0 || (sv_flags & SV_ABI_MASK) == SV_ABI_FREEBSD)) { quad_align = 0; if (sv_flags & SV_ILP32) { #ifdef __powerpc__ quad_align = 1; #endif quad_slots = 2; } else quad_slots = 1; switch (ktr->ktr_code) { case SYS_bindat: case SYS_chflagsat: case SYS_connectat: case SYS_faccessat: case SYS_fchmodat: case SYS_fchownat: case SYS_fstatat: case SYS_futimesat: case SYS_linkat: case SYS_mkdirat: case SYS_mkfifoat: case SYS_mknodat: case SYS_openat: case SYS_readlinkat: case SYS_renameat: case SYS_unlinkat: case SYS_utimensat: putchar('('); print_integer_arg_valid(sysdecode_atfd, *ip); c = ','; ip++; narg--; break; } switch (ktr->ktr_code) { case SYS_ioctl: { print_number(ip, narg, c); putchar(c); ioctlname(*ip); c = ','; ip++; narg--; break; } case SYS_ptrace: putchar('('); print_integer_arg(sysdecode_ptrace_request, *ip); c = ','; ip++; narg--; break; case SYS_access: case SYS_eaccess: case SYS_faccessat: print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_access_mode, *ip); ip++; narg--; break; case SYS_open: case SYS_openat: print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_open_flags, ip[0]); if ((ip[0] & O_CREAT) == O_CREAT) { putchar(','); decode_filemode(ip[1]); } ip += 2; narg -= 2; break; case SYS_wait4: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg0(sysdecode_wait4_options, *ip); ip++; narg--; break; case SYS_wait6: putchar('('); print_integer_arg(sysdecode_idtype, *ip); c = ','; ip++; narg--; print_number64(first, ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_wait6_options, *ip); ip++; narg--; break; case SYS_chmod: case SYS_fchmod: case SYS_lchmod: case SYS_fchmodat: print_number(ip, narg, c); putchar(','); decode_filemode(*ip); ip++; narg--; break; case SYS_mknodat: print_number(ip, narg, c); putchar(','); decode_filemode(*ip); ip++; narg--; break; case SYS_getfsstat: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_getfsstat_mode, *ip); ip++; narg--; break; case SYS_mount: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_mount_flags, *ip); ip++; narg--; break; case SYS_unmount: print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_mount_flags, *ip); ip++; narg--; break; case SYS_recvmsg: case SYS_sendmsg: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg0(sysdecode_msg_flags, *ip); ip++; narg--; break; case SYS_recvfrom: case SYS_sendto: print_number(ip, narg, c); print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg0(sysdecode_msg_flags, *ip); ip++; narg--; break; case SYS_chflags: case SYS_chflagsat: case SYS_fchflags: case SYS_lchflags: print_number(ip, narg, c); putchar(','); decode_fileflags(*ip); ip++; narg--; break; case SYS_kill: print_number(ip, narg, c); putchar(','); print_signal(*ip); ip++; narg--; break; case SYS_reboot: putchar('('); print_mask_arg(sysdecode_reboot_howto, *ip); ip++; narg--; break; case SYS_umask: putchar('('); decode_filemode(*ip); ip++; narg--; break; case SYS_msync: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_msync_flags, *ip); ip++; narg--; break; #ifdef SYS_freebsd6_mmap case SYS_freebsd6_mmap: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_mmap_prot, *ip); putchar(','); ip++; narg--; print_mask_arg(sysdecode_mmap_flags, *ip); ip++; narg--; break; #endif case SYS_mmap: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_mmap_prot, *ip); putchar(','); ip++; narg--; print_mask_arg(sysdecode_mmap_flags, *ip); ip++; narg--; break; case SYS_mprotect: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_mmap_prot, *ip); ip++; narg--; break; case SYS_madvise: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_madvice, *ip); ip++; narg--; break; case SYS_setpriority: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_prio_which, *ip); ip++; narg--; break; case SYS_fcntl: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_fcntl_cmd, ip[0]); if (sysdecode_fcntl_arg_p(ip[0])) { putchar(','); if (ip[0] == F_SETFL) print_mask_arg( sysdecode_fcntl_fileflags, ip[1]); else sysdecode_fcntl_arg(stdout, ip[0], ip[1], decimal ? 10 : 16); } ip += 2; narg -= 2; break; case SYS_socket: { int sockdomain; putchar('('); sockdomain = *ip; print_integer_arg(sysdecode_socketdomain, sockdomain); ip++; narg--; putchar(','); print_mask_arg(sysdecode_socket_type, *ip); ip++; narg--; if (sockdomain == PF_INET || sockdomain == PF_INET6) { putchar(','); print_integer_arg(sysdecode_ipproto, *ip); ip++; narg--; } c = ','; break; } case SYS_setsockopt: case SYS_getsockopt: { const char *str; print_number(ip, narg, c); putchar(','); print_integer_arg_valid(sysdecode_sockopt_level, *ip); str = sysdecode_sockopt_name(ip[0], ip[1]); if (str != NULL) { printf(",%s", str); ip++; narg--; } ip++; narg--; break; } #ifdef SYS_freebsd6_lseek case SYS_freebsd6_lseek: print_number(ip, narg, c); /* Hidden 'pad' argument, not in lseek(2) */ print_number(ip, narg, c); print_number64(first, ip, narg, c); putchar(','); print_integer_arg(sysdecode_whence, *ip); ip++; narg--; break; #endif case SYS_lseek: print_number(ip, narg, c); print_number64(first, ip, narg, c); putchar(','); print_integer_arg(sysdecode_whence, *ip); ip++; narg--; break; case SYS_flock: print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_flock_operation, *ip); ip++; narg--; break; case SYS_mkfifo: case SYS_mkfifoat: case SYS_mkdir: case SYS_mkdirat: print_number(ip, narg, c); putchar(','); decode_filemode(*ip); ip++; narg--; break; case SYS_shutdown: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_shutdown_how, *ip); ip++; narg--; break; case SYS_socketpair: putchar('('); print_integer_arg(sysdecode_socketdomain, *ip); ip++; narg--; putchar(','); print_mask_arg(sysdecode_socket_type, *ip); ip++; narg--; c = ','; break; case SYS_getrlimit: case SYS_setrlimit: putchar('('); print_integer_arg(sysdecode_rlimit, *ip); ip++; narg--; c = ','; break; + case SYS_getrusage: + putchar('('); + print_integer_arg(sysdecode_getrusage_who, *ip); + ip++; + narg--; + c = ','; + break; case SYS_quotactl: print_number(ip, narg, c); putchar(','); if (!sysdecode_quotactl_cmd(stdout, *ip)) { if (decimal) printf("", (int)*ip); else printf("", (int)*ip); } ip++; narg--; c = ','; break; case SYS_nfssvc: putchar('('); print_integer_arg(sysdecode_nfssvc_flags, *ip); ip++; narg--; c = ','; break; case SYS_rtprio: putchar('('); print_integer_arg(sysdecode_rtprio_function, *ip); ip++; narg--; c = ','; break; case SYS___semctl: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_semctl_cmd, *ip); ip++; narg--; break; case SYS_semget: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_semget_flags, *ip); ip++; narg--; break; case SYS_msgctl: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_msgctl_cmd, *ip); ip++; narg--; break; case SYS_shmat: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_shmat_flags, *ip); ip++; narg--; break; case SYS_shmctl: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_shmctl_cmd, *ip); ip++; narg--; break; case SYS_shm_open: print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_open_flags, ip[0]); putchar(','); decode_filemode(ip[1]); ip += 2; narg -= 2; break; case SYS_minherit: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_minherit_inherit, *ip); ip++; narg--; break; case SYS_rfork: putchar('('); print_mask_arg(sysdecode_rfork_flags, *ip); ip++; narg--; c = ','; break; case SYS_lio_listio: putchar('('); print_integer_arg(sysdecode_lio_listio_mode, *ip); ip++; narg--; c = ','; break; case SYS_mlockall: putchar('('); print_mask_arg(sysdecode_mlockall_flags, *ip); ip++; narg--; break; case SYS_sched_setscheduler: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_scheduler_policy, *ip); ip++; narg--; break; case SYS_sched_get_priority_max: case SYS_sched_get_priority_min: putchar('('); print_integer_arg(sysdecode_scheduler_policy, *ip); ip++; narg--; break; case SYS_sendfile: print_number(ip, narg, c); print_number(ip, narg, c); print_number(ip, narg, c); print_number(ip, narg, c); print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_sendfile_flags, *ip); ip++; narg--; break; case SYS_kldsym: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_kldsym_cmd, *ip); ip++; narg--; break; case SYS_sigprocmask: putchar('('); print_integer_arg(sysdecode_sigprocmask_how, *ip); ip++; narg--; c = ','; break; case SYS___acl_get_file: case SYS___acl_set_file: case SYS___acl_get_fd: case SYS___acl_set_fd: case SYS___acl_delete_file: case SYS___acl_delete_fd: case SYS___acl_aclcheck_file: case SYS___acl_aclcheck_fd: case SYS___acl_get_link: case SYS___acl_set_link: case SYS___acl_delete_link: case SYS___acl_aclcheck_link: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_acltype, *ip); ip++; narg--; break; case SYS_sigaction: putchar('('); print_signal(*ip); ip++; narg--; c = ','; break; case SYS_extattrctl: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_extattrnamespace, *ip); ip++; narg--; break; case SYS_nmount: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_mount_flags, *ip); ip++; narg--; break; case SYS_thr_create: print_number(ip, narg, c); print_number(ip, narg, c); putchar(','); print_mask_arg(sysdecode_thr_create_flags, *ip); ip++; narg--; break; case SYS_thr_kill: print_number(ip, narg, c); putchar(','); print_signal(*ip); ip++; narg--; break; case SYS_kldunloadf: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_kldunload_flags, *ip); ip++; narg--; break; case SYS_linkat: case SYS_renameat: case SYS_symlinkat: print_number(ip, narg, c); putchar(','); print_integer_arg_valid(sysdecode_atfd, *ip); ip++; narg--; break; case SYS_cap_fcntls_limit: print_number(ip, narg, c); putchar(','); arg = *ip; ip++; narg--; print_mask_arg32(sysdecode_cap_fcntlrights, arg); break; case SYS_posix_fadvise: print_number(ip, narg, c); print_number(ip, narg, c); print_number(ip, narg, c); (void)putchar(','); print_integer_arg(sysdecode_fadvice, *ip); ip++; narg--; break; case SYS_procctl: putchar('('); print_integer_arg(sysdecode_idtype, *ip); c = ','; ip++; narg--; print_number64(first, ip, narg, c); putchar(','); print_integer_arg(sysdecode_procctl_cmd, *ip); ip++; narg--; break; case SYS__umtx_op: print_number(ip, narg, c); putchar(','); print_integer_arg(sysdecode_umtx_op, *ip); switch (*ip) { case UMTX_OP_CV_WAIT: ip++; narg--; putchar(','); print_mask_argul( sysdecode_umtx_cvwait_flags, *ip); break; case UMTX_OP_RW_RDLOCK: ip++; narg--; putchar(','); print_mask_argul( sysdecode_umtx_rwlock_flags, *ip); break; } ip++; narg--; break; case SYS_ftruncate: case SYS_truncate: print_number(ip, narg, c); print_number64(first, ip, narg, c); break; } } while (narg > 0) { print_number(ip, narg, c); } putchar(')'); } putchar('\n'); } void ktrsysret(struct ktr_sysret *ktr, u_int sv_flags) { register_t ret = ktr->ktr_retval; int error = ktr->ktr_error; syscallname(ktr->ktr_code, sv_flags); printf(" "); if (error == 0) { if (fancy) { printf("%ld", (long)ret); if (ret < 0 || ret > 9) printf("/%#lx", (unsigned long)ret); } else { if (decimal) printf("%ld", (long)ret); else printf("%#lx", (unsigned long)ret); } } else if (error == ERESTART) printf("RESTART"); else if (error == EJUSTRETURN) printf("JUSTRETURN"); else { printf("-1 errno %d", sysdecode_freebsd_to_abi_errno( syscallabi(sv_flags), error)); if (fancy) printf(" %s", strerror(ktr->ktr_error)); } putchar('\n'); } void ktrnamei(char *cp, int len) { printf("\"%.*s\"\n", len, cp); } void hexdump(char *p, int len, int screenwidth) { int n, i; int width; width = 0; do { width += 2; i = 13; /* base offset */ i += (width / 2) + 1; /* spaces every second byte */ i += (width * 2); /* width of bytes */ i += 3; /* " |" */ i += width; /* each byte */ i += 1; /* "|" */ } while (i < screenwidth); width -= 2; for (n = 0; n < len; n += width) { for (i = n; i < n + width; i++) { if ((i % width) == 0) { /* beginning of line */ printf(" 0x%04x", i); } if ((i % 2) == 0) { printf(" "); } if (i < len) printf("%02x", p[i] & 0xff); else printf(" "); } printf(" |"); for (i = n; i < n + width; i++) { if (i >= len) break; if (p[i] >= ' ' && p[i] <= '~') printf("%c", p[i]); else printf("."); } printf("|\n"); } if ((i % width) != 0) printf("\n"); } void visdump(char *dp, int datalen, int screenwidth) { int col = 0; char *cp; int width; char visbuf[5]; printf(" \""); col = 8; for (;datalen > 0; datalen--, dp++) { vis(visbuf, *dp, VIS_CSTYLE, *(dp+1)); cp = visbuf; /* * Keep track of printables and * space chars (like fold(1)). */ if (col == 0) { putchar('\t'); col = 8; } switch(*cp) { case '\n': col = 0; putchar('\n'); continue; case '\t': width = 8 - (col&07); break; default: width = strlen(cp); } if (col + width > (screenwidth-2)) { printf("\\\n\t"); col = 8; } col += width; do { putchar(*cp++); } while (*cp); } if (col == 0) printf(" "); printf("\"\n"); } void ktrgenio(struct ktr_genio *ktr, int len) { int datalen = len - sizeof (struct ktr_genio); char *dp = (char *)ktr + sizeof (struct ktr_genio); static int screenwidth = 0; int i, binary; printf("fd %d %s %d byte%s\n", ktr->ktr_fd, ktr->ktr_rw == UIO_READ ? "read" : "wrote", datalen, datalen == 1 ? "" : "s"); if (suppressdata) return; if (screenwidth == 0) { struct winsize ws; if (fancy && ioctl(fileno(stderr), TIOCGWINSZ, &ws) != -1 && ws.ws_col > 8) screenwidth = ws.ws_col; else screenwidth = 80; } if (maxdata && datalen > maxdata) datalen = maxdata; for (i = 0, binary = 0; i < datalen && binary == 0; i++) { if (dp[i] >= 32 && dp[i] < 127) continue; if (dp[i] == 10 || dp[i] == 13 || dp[i] == 0 || dp[i] == 9) continue; binary = 1; } if (binary) hexdump(dp, datalen, screenwidth); else visdump(dp, datalen, screenwidth); } void ktrpsig(struct ktr_psig *psig) { const char *str; print_signal(psig->signo); if (psig->action == SIG_DFL) { printf(" SIG_DFL"); } else { printf(" caught handler=0x%lx mask=0x%x", (u_long)psig->action, psig->mask.__bits[0]); } printf(" code="); str = sysdecode_sigcode(psig->signo, psig->code); if (str != NULL) printf("%s", str); else printf("", psig->code); putchar('\n'); } void ktrcsw_old(struct ktr_csw_old *cs) { printf("%s %s\n", cs->out ? "stop" : "resume", cs->user ? "user" : "kernel"); } void ktrcsw(struct ktr_csw *cs) { printf("%s %s \"%s\"\n", cs->out ? "stop" : "resume", cs->user ? "user" : "kernel", cs->wmesg); } void ktruser(int len, void *p) { unsigned char *cp; if (sysdecode_utrace(stdout, p, len)) { printf("\n"); return; } printf("%d ", len); cp = p; while (len--) if (decimal) printf(" %d", *cp++); else printf(" %02x", *cp++); printf("\n"); } void ktrcaprights(cap_rights_t *rightsp) { printf("cap_rights_t "); sysdecode_cap_rights(stdout, rightsp); printf("\n"); } static void ktrtimeval(struct timeval *tv) { printf("{%ld, %ld}", (long)tv->tv_sec, tv->tv_usec); } void ktritimerval(struct itimerval *it) { printf("itimerval { .interval = "); ktrtimeval(&it->it_interval); printf(", .value = "); ktrtimeval(&it->it_value); printf(" }\n"); } void ktrsockaddr(struct sockaddr *sa) { /* TODO: Support additional address families #include struct sockaddr_nb *nb; */ const char *str; char addr[64]; /* * note: ktrstruct() has already verified that sa points to a * buffer at least sizeof(struct sockaddr) bytes long and exactly * sa->sa_len bytes long. */ printf("struct sockaddr { "); str = sysdecode_sockaddr_family(sa->sa_family); if (str != NULL) printf("%s", str); else printf("", sa->sa_family); printf(", "); #define check_sockaddr_len(n) \ if (sa_##n.s##n##_len < sizeof(struct sockaddr_##n)) { \ printf("invalid"); \ break; \ } switch(sa->sa_family) { case AF_INET: { struct sockaddr_in sa_in; memset(&sa_in, 0, sizeof(sa_in)); memcpy(&sa_in, sa, sa->sa_len); check_sockaddr_len(in); inet_ntop(AF_INET, &sa_in.sin_addr, addr, sizeof addr); printf("%s:%u", addr, ntohs(sa_in.sin_port)); break; } case AF_INET6: { struct sockaddr_in6 sa_in6; memset(&sa_in6, 0, sizeof(sa_in6)); memcpy(&sa_in6, sa, sa->sa_len); check_sockaddr_len(in6); getnameinfo((struct sockaddr *)&sa_in6, sizeof(sa_in6), addr, sizeof(addr), NULL, 0, NI_NUMERICHOST); printf("[%s]:%u", addr, htons(sa_in6.sin6_port)); break; } case AF_UNIX: { struct sockaddr_un sa_un; memset(&sa_un, 0, sizeof(sa_un)); memcpy(&sa_un, sa, sa->sa_len); printf("%.*s", (int)sizeof(sa_un.sun_path), sa_un.sun_path); break; } default: printf("unknown address family"); } printf(" }\n"); } void ktrstat(struct stat *statp) { char mode[12], timestr[PATH_MAX + 4]; struct passwd *pwd; struct group *grp; struct tm *tm; /* * note: ktrstruct() has already verified that statp points to a * buffer exactly sizeof(struct stat) bytes long. */ printf("struct stat {"); printf("dev=%ju, ino=%ju, ", (uintmax_t)statp->st_dev, (uintmax_t)statp->st_ino); if (resolv == 0) printf("mode=0%jo, ", (uintmax_t)statp->st_mode); else { strmode(statp->st_mode, mode); printf("mode=%s, ", mode); } printf("nlink=%ju, ", (uintmax_t)statp->st_nlink); if (resolv == 0) { pwd = NULL; } else { #ifdef HAVE_LIBCASPER if (cappwd != NULL) pwd = cap_getpwuid(cappwd, statp->st_uid); else #endif pwd = getpwuid(statp->st_uid); } if (pwd == NULL) printf("uid=%ju, ", (uintmax_t)statp->st_uid); else printf("uid=\"%s\", ", pwd->pw_name); if (resolv == 0) { grp = NULL; } else { #ifdef HAVE_LIBCASPER if (capgrp != NULL) grp = cap_getgrgid(capgrp, statp->st_gid); else #endif grp = getgrgid(statp->st_gid); } if (grp == NULL) printf("gid=%ju, ", (uintmax_t)statp->st_gid); else printf("gid=\"%s\", ", grp->gr_name); printf("rdev=%ju, ", (uintmax_t)statp->st_rdev); printf("atime="); if (resolv == 0) printf("%jd", (intmax_t)statp->st_atim.tv_sec); else { tm = localtime(&statp->st_atim.tv_sec); strftime(timestr, sizeof(timestr), TIME_FORMAT, tm); printf("\"%s\"", timestr); } if (statp->st_atim.tv_nsec != 0) printf(".%09ld, ", statp->st_atim.tv_nsec); else printf(", "); printf("mtime="); if (resolv == 0) printf("%jd", (intmax_t)statp->st_mtim.tv_sec); else { tm = localtime(&statp->st_mtim.tv_sec); strftime(timestr, sizeof(timestr), TIME_FORMAT, tm); printf("\"%s\"", timestr); } if (statp->st_mtim.tv_nsec != 0) printf(".%09ld, ", statp->st_mtim.tv_nsec); else printf(", "); printf("ctime="); if (resolv == 0) printf("%jd", (intmax_t)statp->st_ctim.tv_sec); else { tm = localtime(&statp->st_ctim.tv_sec); strftime(timestr, sizeof(timestr), TIME_FORMAT, tm); printf("\"%s\"", timestr); } if (statp->st_ctim.tv_nsec != 0) printf(".%09ld, ", statp->st_ctim.tv_nsec); else printf(", "); printf("birthtime="); if (resolv == 0) printf("%jd", (intmax_t)statp->st_birthtim.tv_sec); else { tm = localtime(&statp->st_birthtim.tv_sec); strftime(timestr, sizeof(timestr), TIME_FORMAT, tm); printf("\"%s\"", timestr); } if (statp->st_birthtim.tv_nsec != 0) printf(".%09ld, ", statp->st_birthtim.tv_nsec); else printf(", "); printf("size=%jd, blksize=%ju, blocks=%jd, flags=0x%x", (uintmax_t)statp->st_size, (uintmax_t)statp->st_blksize, (intmax_t)statp->st_blocks, statp->st_flags); printf(" }\n"); } void ktrstruct(char *buf, size_t buflen) { char *name, *data; size_t namelen, datalen; int i; cap_rights_t rights; struct itimerval it; struct stat sb; struct sockaddr_storage ss; for (name = buf, namelen = 0; namelen < buflen && name[namelen] != '\0'; ++namelen) /* nothing */; if (namelen == buflen) goto invalid; if (name[namelen] != '\0') goto invalid; data = buf + namelen + 1; datalen = buflen - namelen - 1; if (datalen == 0) goto invalid; /* sanity check */ for (i = 0; i < (int)namelen; ++i) if (!isalpha(name[i])) goto invalid; if (strcmp(name, "caprights") == 0) { if (datalen != sizeof(cap_rights_t)) goto invalid; memcpy(&rights, data, datalen); ktrcaprights(&rights); } else if (strcmp(name, "itimerval") == 0) { if (datalen != sizeof(struct itimerval)) goto invalid; memcpy(&it, data, datalen); ktritimerval(&it); } else if (strcmp(name, "stat") == 0) { if (datalen != sizeof(struct stat)) goto invalid; memcpy(&sb, data, datalen); ktrstat(&sb); } else if (strcmp(name, "sockaddr") == 0) { if (datalen > sizeof(ss)) goto invalid; memcpy(&ss, data, datalen); if (datalen != ss.ss_len) goto invalid; ktrsockaddr((struct sockaddr *)&ss); } else { printf("unknown structure\n"); } return; invalid: printf("invalid record\n"); } void ktrcapfail(struct ktr_cap_fail *ktr) { switch (ktr->cap_type) { case CAPFAIL_NOTCAPABLE: /* operation on fd with insufficient capabilities */ printf("operation requires "); sysdecode_cap_rights(stdout, &ktr->cap_needed); printf(", descriptor holds "); sysdecode_cap_rights(stdout, &ktr->cap_held); break; case CAPFAIL_INCREASE: /* requested more capabilities than fd already has */ printf("attempt to increase capabilities from "); sysdecode_cap_rights(stdout, &ktr->cap_held); printf(" to "); sysdecode_cap_rights(stdout, &ktr->cap_needed); break; case CAPFAIL_SYSCALL: /* called restricted syscall */ printf("disallowed system call"); break; case CAPFAIL_LOOKUP: /* used ".." in strict-relative mode */ printf("restricted VFS lookup"); break; default: printf("unknown capability failure: "); sysdecode_cap_rights(stdout, &ktr->cap_needed); printf(" "); sysdecode_cap_rights(stdout, &ktr->cap_held); break; } printf("\n"); } void ktrfault(struct ktr_fault *ktr) { printf("0x%jx ", (uintmax_t)ktr->vaddr); print_mask_arg(sysdecode_vmprot, ktr->type); printf("\n"); } void ktrfaultend(struct ktr_faultend *ktr) { const char *str; str = sysdecode_vmresult(ktr->result); if (str != NULL) printf("%s", str); else printf("", ktr->result); printf("\n"); } void usage(void) { fprintf(stderr, "usage: kdump [-dEnlHRrSsTA] [-f trfile] " "[-m maxdata] [-p pid] [-t trstr]\n"); exit(1); } Index: projects/clang500-import/usr.bin/last/last.c =================================================================== --- projects/clang500-import/usr.bin/last/last.c (revision 319548) +++ projects/clang500-import/usr.bin/last/last.c (revision 319549) @@ -1,571 +1,571 @@ /* * Copyright (c) 1987, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1987, 1993, 1994\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint static const char sccsid[] = "@(#)last.c 8.2 (Berkeley) 4/2/94"; #endif /* not lint */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NO 0 /* false/no */ #define YES 1 /* true/yes */ #define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2; typedef struct arg { char *name; /* argument */ #define REBOOT_TYPE -1 #define HOST_TYPE -2 #define TTY_TYPE -3 #define USER_TYPE -4 int type; /* type of arg */ struct arg *next; /* linked list pointer */ } ARG; static ARG *arglist; /* head of linked list */ static SLIST_HEAD(, idtab) idlist; struct idtab { time_t logout; /* log out time */ char id[sizeof ((struct utmpx *)0)->ut_id]; /* identifier */ SLIST_ENTRY(idtab) list; }; static const char *crmsg; /* cause of last reboot */ static time_t currentout; /* current logout value */ static long maxrec; /* records to display */ static const char *file = NULL; /* utx.log file */ static int sflag = 0; /* show delta in seconds */ static int width = 5; /* show seconds in delta */ static int yflag; /* show year */ static int d_first; static int snapfound = 0; /* found snapshot entry? */ static time_t snaptime; /* if != 0, we will only * report users logged in * at this snapshot time */ static void addarg(int, char *); static time_t dateconv(char *); static void doentry(struct utmpx *); static void hostconv(char *); static void printentry(struct utmpx *, struct idtab *); static char *ttyconv(char *); static int want(struct utmpx *); static void usage(void); static void wtmp(void); static void usage(void) { (void)fprintf(stderr, "usage: last [-swy] [-d [[CC]YY][MMDD]hhmm[.SS]] [-f file] [-h host]\n" " [-n maxrec] [-t tty] [user ...]\n"); exit(1); } int main(int argc, char *argv[]) { int ch; char *p; (void) setlocale(LC_TIME, ""); d_first = (*nl_langinfo(D_MD_ORDER) == 'd'); maxrec = -1; snaptime = 0; while ((ch = getopt(argc, argv, "0123456789d:f:h:n:st:wy")) != -1) switch (ch) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': /* * kludge: last was originally designed to take * a number after a dash. */ if (maxrec == -1) { p = strchr(argv[optind - 1], ch); if (p == NULL) p = strchr(argv[optind], ch); maxrec = atol(p); if (!maxrec) exit(0); } break; case 'd': snaptime = dateconv(optarg); break; case 'f': file = optarg; break; case 'h': hostconv(optarg); addarg(HOST_TYPE, optarg); break; case 'n': errno = 0; maxrec = strtol(optarg, &p, 10); if (p == optarg || *p != '\0' || errno != 0 || maxrec <= 0) errx(1, "%s: bad line count", optarg); break; case 's': sflag++; /* Show delta as seconds */ break; case 't': addarg(TTY_TYPE, ttyconv(optarg)); break; case 'w': width = 8; break; case 'y': yflag++; break; case '?': default: usage(); } if (caph_limit_stdio() < 0) err(1, "can't limit stdio rights"); caph_cache_catpages(); caph_cache_tzdata(); /* Cache UTX database. */ if (setutxdb(UTXDB_LOG, file) != 0) err(1, "%s", file != NULL ? file : "(default utx db)"); if (cap_enter() < 0 && errno != ENOSYS) err(1, "cap_enter"); if (sflag && width == 8) usage(); if (argc) { setlinebuf(stdout); for (argv += optind; *argv; ++argv) { if (strcmp(*argv, "reboot") == 0) addarg(REBOOT_TYPE, *argv); #define COMPATIBILITY #ifdef COMPATIBILITY /* code to allow "last p5" to work */ addarg(TTY_TYPE, ttyconv(*argv)); #endif addarg(USER_TYPE, *argv); } } wtmp(); exit(0); } /* * wtmp -- * read through the utx.log file */ static void wtmp(void) { struct utmpx *buf = NULL; struct utmpx *ut; static unsigned int amount = 0; time_t t; char ct[80]; struct tm *tm; SLIST_INIT(&idlist); (void)time(&t); /* Load the last entries from the file. */ while ((ut = getutxent()) != NULL) { if (amount % 128 == 0) { buf = realloc(buf, (amount + 128) * sizeof *ut); if (buf == NULL) err(1, "realloc"); } memcpy(&buf[amount++], ut, sizeof *ut); if (t > ut->ut_tv.tv_sec) t = ut->ut_tv.tv_sec; } endutxent(); /* Display them in reverse order. */ while (amount > 0) doentry(&buf[--amount]); - + free(buf); tm = localtime(&t); (void) strftime(ct, sizeof(ct), "%+", tm); printf("\n%s begins %s\n", ((file == NULL) ? "utx.log" : file), ct); } /* * doentry -- * process a single utx.log entry */ static void doentry(struct utmpx *bp) { struct idtab *tt; /* the machine stopped */ if (bp->ut_type == BOOT_TIME || bp->ut_type == SHUTDOWN_TIME) { /* everybody just logged out */ while ((tt = SLIST_FIRST(&idlist)) != NULL) { SLIST_REMOVE_HEAD(&idlist, list); free(tt); } currentout = -bp->ut_tv.tv_sec; crmsg = bp->ut_type != SHUTDOWN_TIME ? "crash" : "shutdown"; /* * if we're in snapshot mode, we want to exit if this * shutdown/reboot appears while we we are tracking the * active range */ if (snaptime && snapfound) exit(0); /* * don't print shutdown/reboot entries unless flagged for */ if (!snaptime && want(bp)) printentry(bp, NULL); return; } /* date got set */ if (bp->ut_type == OLD_TIME || bp->ut_type == NEW_TIME) { if (want(bp) && !snaptime) printentry(bp, NULL); return; } if (bp->ut_type != USER_PROCESS && bp->ut_type != DEAD_PROCESS) return; /* find associated identifier */ SLIST_FOREACH(tt, &idlist, list) if (!memcmp(tt->id, bp->ut_id, sizeof bp->ut_id)) break; if (tt == NULL) { /* add new one */ tt = malloc(sizeof(struct idtab)); if (tt == NULL) errx(1, "malloc failure"); tt->logout = currentout; memcpy(tt->id, bp->ut_id, sizeof bp->ut_id); SLIST_INSERT_HEAD(&idlist, tt, list); } /* * print record if not in snapshot mode and wanted * or in snapshot mode and in snapshot range */ if (bp->ut_type == USER_PROCESS && (want(bp) || (bp->ut_tv.tv_sec < snaptime && (tt->logout > snaptime || tt->logout < 1)))) { snapfound = 1; printentry(bp, tt); } tt->logout = bp->ut_tv.tv_sec; } /* * printentry -- * output an entry * * If `tt' is non-NULL, use it and `crmsg' to print the logout time or * logout type (crash/shutdown) as appropriate. */ static void printentry(struct utmpx *bp, struct idtab *tt) { char ct[80]; struct tm *tm; time_t delta; /* time difference */ time_t t; if (maxrec != -1 && !maxrec--) exit(0); t = bp->ut_tv.tv_sec; tm = localtime(&t); (void) strftime(ct, sizeof(ct), d_first ? (yflag ? "%a %e %b %Y %R" : "%a %e %b %R") : (yflag ? "%a %b %e %Y %R" : "%a %b %e %R"), tm); switch (bp->ut_type) { case BOOT_TIME: printf("%-42s", "boot time"); break; case SHUTDOWN_TIME: printf("%-42s", "shutdown time"); break; case OLD_TIME: printf("%-42s", "old time"); break; case NEW_TIME: printf("%-42s", "new time"); break; case USER_PROCESS: printf("%-10s %-8s %-22.22s", bp->ut_user, bp->ut_line, bp->ut_host); break; } printf(" %s%c", ct, tt == NULL ? '\n' : ' '); if (tt == NULL) return; if (!tt->logout) { puts(" still logged in"); return; } if (tt->logout < 0) { tt->logout = -tt->logout; printf("- %s", crmsg); } else { tm = localtime(&tt->logout); (void) strftime(ct, sizeof(ct), "%R", tm); printf("- %s", ct); } delta = tt->logout - bp->ut_tv.tv_sec; if (sflag) { printf(" (%8ld)\n", (long)delta); } else { tm = gmtime(&delta); (void) strftime(ct, sizeof(ct), width >= 8 ? "%T" : "%R", tm); if (delta < 86400) printf(" (%s)\n", ct); else printf(" (%ld+%s)\n", (long)delta / 86400, ct); } } /* * want -- * see if want this entry */ static int want(struct utmpx *bp) { ARG *step; if (snaptime) return (NO); if (!arglist) return (YES); for (step = arglist; step; step = step->next) switch(step->type) { case REBOOT_TYPE: if (bp->ut_type == BOOT_TIME || bp->ut_type == SHUTDOWN_TIME) return (YES); break; case HOST_TYPE: if (!strcasecmp(step->name, bp->ut_host)) return (YES); break; case TTY_TYPE: if (!strcmp(step->name, bp->ut_line)) return (YES); break; case USER_TYPE: if (!strcmp(step->name, bp->ut_user)) return (YES); break; } return (NO); } /* * addarg -- * add an entry to a linked list of arguments */ static void addarg(int type, char *arg) { ARG *cur; if ((cur = malloc(sizeof(ARG))) == NULL) errx(1, "malloc failure"); cur->next = arglist; cur->type = type; cur->name = arg; arglist = cur; } /* * hostconv -- * convert the hostname to search pattern; if the supplied host name * has a domain attached that is the same as the current domain, rip * off the domain suffix since that's what login(1) does. */ static void hostconv(char *arg) { static int first = 1; static char *hostdot, name[MAXHOSTNAMELEN]; char *argdot; if (!(argdot = strchr(arg, '.'))) return; if (first) { first = 0; if (gethostname(name, sizeof(name))) err(1, "gethostname"); hostdot = strchr(name, '.'); } if (hostdot && !strcasecmp(hostdot, argdot)) *argdot = '\0'; } /* * ttyconv -- * convert tty to correct name. */ static char * ttyconv(char *arg) { char *mval; /* * kludge -- we assume that all tty's end with * a two character suffix. */ if (strlen(arg) == 2) { /* either 6 for "ttyxx" or 8 for "console" */ if ((mval = malloc(8)) == NULL) errx(1, "malloc failure"); if (!strcmp(arg, "co")) (void)strcpy(mval, "console"); else { (void)strcpy(mval, "tty"); (void)strcpy(mval + 3, arg); } return (mval); } if (!strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1)) return (arg + 5); return (arg); } /* * dateconv -- * Convert the snapshot time in command line given in the format * [[CC]YY]MMDDhhmm[.SS]] to a time_t. * Derived from atime_arg1() in usr.bin/touch/touch.c */ static time_t dateconv(char *arg) { time_t timet; struct tm *t; int yearset; char *p; /* Start with the current time. */ if (time(&timet) < 0) err(1, "time"); if ((t = localtime(&timet)) == NULL) err(1, "localtime"); /* [[CC]YY]MMDDhhmm[.SS] */ if ((p = strchr(arg, '.')) == NULL) t->tm_sec = 0; /* Seconds defaults to 0. */ else { if (strlen(p + 1) != 2) goto terr; *p++ = '\0'; t->tm_sec = ATOI2(p); } yearset = 0; switch (strlen(arg)) { case 12: /* CCYYMMDDhhmm */ t->tm_year = ATOI2(arg); t->tm_year *= 100; yearset = 1; /* FALLTHROUGH */ case 10: /* YYMMDDhhmm */ if (yearset) { yearset = ATOI2(arg); t->tm_year += yearset; } else { yearset = ATOI2(arg); if (yearset < 69) t->tm_year = yearset + 2000; else t->tm_year = yearset + 1900; } t->tm_year -= 1900; /* Convert to UNIX time. */ /* FALLTHROUGH */ case 8: /* MMDDhhmm */ t->tm_mon = ATOI2(arg); --t->tm_mon; /* Convert from 01-12 to 00-11 */ t->tm_mday = ATOI2(arg); t->tm_hour = ATOI2(arg); t->tm_min = ATOI2(arg); break; case 4: /* hhmm */ t->tm_hour = ATOI2(arg); t->tm_min = ATOI2(arg); break; default: goto terr; } t->tm_isdst = -1; /* Figure out DST. */ timet = mktime(t); if (timet == -1) terr: errx(1, "out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]"); return timet; } Index: projects/clang500-import/usr.bin/truss/syscall.h =================================================================== --- projects/clang500-import/usr.bin/truss/syscall.h (revision 319548) +++ projects/clang500-import/usr.bin/truss/syscall.h (revision 319549) @@ -1,127 +1,128 @@ /* * See i386-fbsd.c for copyright and license terms. * * System call arguments come in several flavours: * Hex -- values that should be printed in hex (addresses) * Octal -- Same as above, but octal * Int -- normal integer values (file descriptors, for example) * LongHex -- long value that should be printed in hex * Name -- pointer to a NULL-terminated string. * BinString -- pointer to an array of chars, printed via strvisx(). * Ptr -- pointer to some unspecified structure. Just print as hex for now. * Stat -- a pointer to a stat buffer. Prints a couple fields. * StatFs -- a pointer to a statfs buffer. Prints a few fields. * Ioctl -- an ioctl command. Woefully limited. * Quad -- a double-word value. e.g., lseek(int, offset_t, int) * Signal -- a signal number. Prints the signal name (SIGxxx) * Sockaddr -- a pointer to a struct sockaddr. Prints symbolic AF, and IP:Port * StringArray -- a pointer to an array of string pointers. * Timespec -- a pointer to a struct timespec. Prints both elements. * Timeval -- a pointer to a struct timeval. Prints both elements. * Timeval2 -- a pointer to two struct timevals. Prints both elements of both. * Itimerval -- a pointer to a struct itimerval. Prints all elements. * Pollfd -- a pointer to an array of struct pollfd. Prints .fd and .events. * Fd_set -- a pointer to an array of fd_set. Prints the fds that are set. * Sigaction -- a pointer to a struct sigaction. Prints all elements. * Sigset -- a pointer to a sigset_t. Prints the signals that are set. * Sigprocmask -- the first argument to sigprocmask(). Prints the name. * Kevent -- a pointer to an array of struct kevents. Prints all elements. * Pathconf -- the 2nd argument of pathconf(). * Utrace -- utrace(2) buffer. + * CapRights -- a pointer to a cap_rights_t. Prints all set capabilities. * * In addition, the pointer types (String, Ptr) may have OUT masked in -- * this means that the data is set on *return* from the system call -- or * IN (meaning that the data is passed *into* the system call). */ /* * $FreeBSD$ */ enum Argtype { None = 1, Hex, Octal, Int, UInt, LongHex, Name, Ptr, Stat, Ioctl, Quad, Signal, Sockaddr, StringArray, Timespec, Timeval, Itimerval, Pollfd, Fd_set, Sigaction, Fcntl, Mprot, Mmapflags, Whence, Readlinkres, Sigset, Sigprocmask, StatFs, Kevent, Sockdomain, Socktype, Open, - Fcntlflag, Rusage, BinString, Shutdown, Resource, Rlimit, Timeval2, - Pathconf, Rforkflags, ExitStatus, Waitoptions, Idtype, Procctl, + Fcntlflag, Rusage, RusageWho, BinString, Shutdown, Resource, Rlimit, + Timeval2, Pathconf, Rforkflags, ExitStatus, Waitoptions, Idtype, Procctl, LinuxSockArgs, Umtxop, Atfd, Atflags, Timespec2, Accessmode, Long, Sysarch, ExecArgs, ExecEnv, PipeFds, QuadHex, Utrace, IntArray, Pipe2, CapFcntlRights, Fadvice, FileFlags, Flockop, Getfsstatmode, Kldsymcmd, Kldunloadflags, Sizet, Madvice, Socklent, Sockprotocol, Sockoptlevel, - Sockoptname, Msgflags, + Sockoptname, Msgflags, CapRights, PUInt, CloudABIAdvice, CloudABIClockID, ClouduABIFDSFlags, CloudABIFDStat, CloudABIFileStat, CloudABIFileType, CloudABIFSFlags, CloudABILookup, CloudABIMFlags, CloudABIMProt, CloudABIMSFlags, CloudABIOFlags, CloudABISDFlags, CloudABISignal, CloudABISockStat, CloudABISSFlags, CloudABITimestamp, CloudABIULFlags, CloudABIWhence }; #define ARG_MASK 0xff #define OUT 0x100 #define IN /*0x20*/0 struct syscall_args { enum Argtype type; int offset; }; struct syscall { STAILQ_ENTRY(syscall) entries; const char *name; u_int ret_type; /* 0, 1, or 2 return values */ u_int nargs; /* actual number of meaningful arguments */ /* Hopefully, no syscalls with > 10 args */ struct syscall_args args[10]; struct timespec time; /* Time spent for this call */ int ncalls; /* Number of calls */ int nerror; /* Number of calls that returned with error */ bool unknown; /* Unknown system call */ }; struct syscall *get_syscall(struct threadinfo *, u_int, u_int); char *print_arg(struct syscall_args *, unsigned long*, long *, struct trussinfo *); /* * Linux Socket defines */ #define LINUX_SOCKET 1 #define LINUX_BIND 2 #define LINUX_CONNECT 3 #define LINUX_LISTEN 4 #define LINUX_ACCEPT 5 #define LINUX_GETSOCKNAME 6 #define LINUX_GETPEERNAME 7 #define LINUX_SOCKETPAIR 8 #define LINUX_SEND 9 #define LINUX_RECV 10 #define LINUX_SENDTO 11 #define LINUX_RECVFROM 12 #define LINUX_SHUTDOWN 13 #define LINUX_SETSOCKOPT 14 #define LINUX_GETSOCKOPT 15 #define LINUX_SENDMSG 16 #define LINUX_RECVMSG 17 #define PAD_(t) (sizeof(register_t) <= sizeof(t) ? \ 0 : sizeof(register_t) - sizeof(t)) #if BYTE_ORDER == LITTLE_ENDIAN #define PADL_(t) 0 #define PADR_(t) PAD_(t) #else #define PADL_(t) PAD_(t) #define PADR_(t) 0 #endif typedef int l_int; typedef uint32_t l_ulong; struct linux_socketcall_args { char what_l_[PADL_(l_int)]; l_int what; char what_r_[PADR_(l_int)]; char args_l_[PADL_(l_ulong)]; l_ulong args; char args_r_[PADR_(l_ulong)]; }; void init_syscalls(void); void print_syscall(struct trussinfo *); void print_syscall_ret(struct trussinfo *, int, long *); void print_summary(struct trussinfo *trussinfo); Index: projects/clang500-import/usr.bin/truss/syscalls.c =================================================================== --- projects/clang500-import/usr.bin/truss/syscalls.c (revision 319548) +++ projects/clang500-import/usr.bin/truss/syscalls.c (revision 319549) @@ -1,2192 +1,2224 @@ /* * Copyright 1997 Sean Eric Fagan * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Sean Eric Fagan * 4. Neither the name of the author may be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This file has routines used to print out system calls and their * arguments. */ +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "truss.h" #include "extern.h" #include "syscall.h" /* * This should probably be in its own file, sorted alphabetically. */ static struct syscall decoded_syscalls[] = { /* Native ABI */ + { .name = "__cap_rights_get", .ret_type = 1, .nargs = 3, + .args = { { Int, 0 }, { Int, 1 }, { CapRights | OUT, 2 } } }, { .name = "__getcwd", .ret_type = 1, .nargs = 2, .args = { { Name | OUT, 0 }, { Int, 1 } } }, { .name = "_umtx_op", .ret_type = 1, .nargs = 5, .args = { { Ptr, 0 }, { Umtxop, 1 }, { LongHex, 2 }, { Ptr, 3 }, { Ptr, 4 } } }, { .name = "accept", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Sockaddr | OUT, 1 }, { Ptr | OUT, 2 } } }, { .name = "access", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Accessmode, 1 } } }, { .name = "bind", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Sockaddr | IN, 1 }, { Socklent, 2 } } }, { .name = "bindat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Int, 1 }, { Sockaddr | IN, 2 }, { Int, 3 } } }, { .name = "break", .ret_type = 1, .nargs = 1, .args = { { Ptr, 0 } } }, { .name = "cap_fcntls_get", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { CapFcntlRights | OUT, 1 } } }, { .name = "cap_fcntls_limit", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { CapFcntlRights, 1 } } }, + { .name = "cap_getmode", .ret_type = 1, .nargs = 1, + .args = { { PUInt | OUT, 0 } } }, + { .name = "cap_rights_limit", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { CapRights, 1 } } }, { .name = "chdir", .ret_type = 1, .nargs = 1, .args = { { Name, 0 } } }, { .name = "chflags", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { FileFlags, 1 } } }, { .name = "chflagsat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name | IN, 1 }, { FileFlags, 2 }, { Atflags, 3 } } }, { .name = "chmod", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Octal, 1 } } }, { .name = "chown", .ret_type = 1, .nargs = 3, .args = { { Name, 0 }, { Int, 1 }, { Int, 2 } } }, { .name = "chroot", .ret_type = 1, .nargs = 1, .args = { { Name, 0 } } }, { .name = "clock_gettime", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Timespec | OUT, 1 } } }, { .name = "close", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "connect", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Sockaddr | IN, 1 }, { Socklent, 2 } } }, { .name = "connectat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Int, 1 }, { Sockaddr | IN, 2 }, { Int, 3 } } }, { .name = "eaccess", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Accessmode, 1 } } }, { .name = "execve", .ret_type = 1, .nargs = 3, .args = { { Name | IN, 0 }, { ExecArgs | IN, 1 }, { ExecEnv | IN, 2 } } }, { .name = "exit", .ret_type = 0, .nargs = 1, .args = { { Hex, 0 } } }, { .name = "faccessat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name | IN, 1 }, { Accessmode, 2 }, { Atflags, 3 } } }, { .name = "fchflags", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { FileFlags, 1 } } }, { .name = "fchmod", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Octal, 1 } } }, { .name = "fchmodat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 }, { Atflags, 3 } } }, { .name = "fchown", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Int, 1 }, { Int, 2 } } }, { .name = "fchownat", .ret_type = 1, .nargs = 5, .args = { { Atfd, 0 }, { Name, 1 }, { Int, 2 }, { Int, 3 }, { Atflags, 4 } } }, { .name = "fcntl", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Fcntl, 1 }, { Fcntlflag, 2 } } }, { .name = "flock", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Flockop, 1 } } }, { .name = "fstat", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Stat | OUT, 1 } } }, { .name = "fstatat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name | IN, 1 }, { Stat | OUT, 2 }, { Atflags, 3 } } }, { .name = "fstatfs", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { StatFs | OUT, 1 } } }, { .name = "ftruncate", .ret_type = 1, .nargs = 2, .args = { { Int | IN, 0 }, { QuadHex | IN, 1 } } }, { .name = "futimens", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Timespec2 | IN, 1 } } }, { .name = "futimes", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Timeval2 | IN, 1 } } }, { .name = "futimesat", .ret_type = 1, .nargs = 3, .args = { { Atfd, 0 }, { Name | IN, 1 }, { Timeval2 | IN, 2 } } }, { .name = "getfsstat", .ret_type = 1, .nargs = 3, .args = { { Ptr, 0 }, { Long, 1 }, { Getfsstatmode, 2 } } }, { .name = "getitimer", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Itimerval | OUT, 2 } } }, { .name = "getpeername", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Sockaddr | OUT, 1 }, { Ptr | OUT, 2 } } }, { .name = "getpgid", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "getrlimit", .ret_type = 1, .nargs = 2, .args = { { Resource, 0 }, { Rlimit | OUT, 1 } } }, { .name = "getrusage", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { Rusage | OUT, 1 } } }, + .args = { { RusageWho, 0 }, { Rusage | OUT, 1 } } }, { .name = "getsid", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "getsockname", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Sockaddr | OUT, 1 }, { Ptr | OUT, 2 } } }, { .name = "getsockopt", .ret_type = 1, .nargs = 5, .args = { { Int, 0 }, { Sockoptlevel, 1 }, { Sockoptname, 2 }, { Ptr | OUT, 3 }, { Ptr | OUT, 4 } } }, { .name = "gettimeofday", .ret_type = 1, .nargs = 2, .args = { { Timeval | OUT, 0 }, { Ptr, 1 } } }, { .name = "ioctl", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Ioctl, 1 }, { Ptr, 2 } } }, { .name = "kevent", .ret_type = 1, .nargs = 6, .args = { { Int, 0 }, { Kevent, 1 }, { Int, 2 }, { Kevent | OUT, 3 }, { Int, 4 }, { Timespec, 5 } } }, { .name = "kill", .ret_type = 1, .nargs = 2, .args = { { Int | IN, 0 }, { Signal | IN, 1 } } }, { .name = "kldfind", .ret_type = 1, .nargs = 1, .args = { { Name | IN, 0 } } }, { .name = "kldfirstmod", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "kldload", .ret_type = 1, .nargs = 1, .args = { { Name | IN, 0 } } }, { .name = "kldnext", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "kldstat", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Ptr, 1 } } }, { .name = "kldsym", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Kldsymcmd, 1 }, { Ptr, 2 } } }, { .name = "kldunload", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "kldunloadf", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Kldunloadflags, 1 } } }, { .name = "kse_release", .ret_type = 0, .nargs = 1, .args = { { Timespec, 0 } } }, { .name = "lchflags", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { FileFlags, 1 } } }, { .name = "lchmod", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Octal, 1 } } }, { .name = "lchown", .ret_type = 1, .nargs = 3, .args = { { Name, 0 }, { Int, 1 }, { Int, 2 } } }, { .name = "link", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Name, 1 } } }, { .name = "linkat", .ret_type = 1, .nargs = 5, .args = { { Atfd, 0 }, { Name, 1 }, { Atfd, 2 }, { Name, 3 }, { Atflags, 4 } } }, { .name = "listen", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Int, 1 } } }, { .name = "lseek", .ret_type = 2, .nargs = 3, .args = { { Int, 0 }, { QuadHex, 1 }, { Whence, 2 } } }, { .name = "lstat", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Stat | OUT, 1 } } }, { .name = "lutimes", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Timeval2 | IN, 1 } } }, { .name = "madvise", .ret_type = 1, .nargs = 3, .args = { { Ptr, 0 }, { Sizet, 1 }, { Madvice, 2 } } }, { .name = "mkdir", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Octal, 1 } } }, { .name = "mkdirat", .ret_type = 1, .nargs = 3, .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 } } }, { .name = "mkfifo", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Octal, 1 } } }, { .name = "mkfifoat", .ret_type = 1, .nargs = 3, .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 } } }, { .name = "mknod", .ret_type = 1, .nargs = 3, .args = { { Name, 0 }, { Octal, 1 }, { Int, 2 } } }, { .name = "mknodat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 }, { Int, 3 } } }, { .name = "mmap", .ret_type = 1, .nargs = 6, .args = { { Ptr, 0 }, { Sizet, 1 }, { Mprot, 2 }, { Mmapflags, 3 }, { Int, 4 }, { QuadHex, 5 } } }, { .name = "modfind", .ret_type = 1, .nargs = 1, .args = { { Name | IN, 0 } } }, { .name = "mount", .ret_type = 1, .nargs = 4, .args = { { Name, 0 }, { Name, 1 }, { Int, 2 }, { Ptr, 3 } } }, { .name = "mprotect", .ret_type = 1, .nargs = 3, .args = { { Ptr, 0 }, { Sizet, 1 }, { Mprot, 2 } } }, { .name = "munmap", .ret_type = 1, .nargs = 2, .args = { { Ptr, 0 }, { Sizet, 1 } } }, { .name = "nanosleep", .ret_type = 1, .nargs = 1, .args = { { Timespec, 0 } } }, { .name = "open", .ret_type = 1, .nargs = 3, .args = { { Name | IN, 0 }, { Open, 1 }, { Octal, 2 } } }, { .name = "openat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name | IN, 1 }, { Open, 2 }, { Octal, 3 } } }, { .name = "pathconf", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Pathconf, 1 } } }, { .name = "pipe", .ret_type = 1, .nargs = 1, .args = { { PipeFds | OUT, 0 } } }, { .name = "pipe2", .ret_type = 1, .nargs = 2, .args = { { Ptr, 0 }, { Pipe2, 1 } } }, { .name = "poll", .ret_type = 1, .nargs = 3, .args = { { Pollfd, 0 }, { Int, 1 }, { Int, 2 } } }, { .name = "posix_fadvise", .ret_type = 1, .nargs = 4, .args = { { Int, 0 }, { QuadHex, 1 }, { QuadHex, 2 }, { Fadvice, 3 } } }, { .name = "posix_openpt", .ret_type = 1, .nargs = 1, .args = { { Open, 0 } } }, { .name = "procctl", .ret_type = 1, .nargs = 4, .args = { { Idtype, 0 }, { Quad, 1 }, { Procctl, 2 }, { Ptr, 3 } } }, { .name = "read", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { BinString | OUT, 1 }, { Sizet, 2 } } }, { .name = "readlink", .ret_type = 1, .nargs = 3, .args = { { Name, 0 }, { Readlinkres | OUT, 1 }, { Sizet, 2 } } }, { .name = "readlinkat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name, 1 }, { Readlinkres | OUT, 2 }, { Sizet, 3 } } }, { .name = "recvfrom", .ret_type = 1, .nargs = 6, .args = { { Int, 0 }, { BinString | OUT, 1 }, { Sizet, 2 }, { Msgflags, 3 }, { Sockaddr | OUT, 4 }, { Ptr | OUT, 5 } } }, { .name = "recvmsg", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Ptr, 1 }, { Msgflags, 2 } } }, { .name = "rename", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Name, 1 } } }, { .name = "renameat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name, 1 }, { Atfd, 2 }, { Name, 3 } } }, { .name = "rfork", .ret_type = 1, .nargs = 1, .args = { { Rforkflags, 0 } } }, { .name = "rmdir", .ret_type = 1, .nargs = 1, .args = { { Name, 0 } } }, { .name = "sctp_generic_recvmsg", .ret_type = 1, .nargs = 7, .args = { { Int, 0 }, { Ptr | IN, 1 }, { Int, 2 }, { Sockaddr | OUT, 3 }, { Ptr | OUT, 4 }, { Ptr | OUT, 5 }, { Ptr | OUT, 6 } } }, { .name = "sctp_generic_sendmsg", .ret_type = 1, .nargs = 7, .args = { { Int, 0 }, { BinString | IN, 1 }, { Int, 2 }, { Sockaddr | IN, 3 }, { Socklent, 4 }, { Ptr | IN, 5 }, { Msgflags, 6 } } }, { .name = "select", .ret_type = 1, .nargs = 5, .args = { { Int, 0 }, { Fd_set, 1 }, { Fd_set, 2 }, { Fd_set, 3 }, { Timeval, 4 } } }, { .name = "sendmsg", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Ptr, 1 }, { Msgflags, 2 } } }, { .name = "sendto", .ret_type = 1, .nargs = 6, .args = { { Int, 0 }, { BinString | IN, 1 }, { Sizet, 2 }, { Msgflags, 3 }, { Sockaddr | IN, 4 }, { Socklent | IN, 5 } } }, { .name = "setitimer", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Itimerval, 1 }, { Itimerval | OUT, 2 } } }, { .name = "setrlimit", .ret_type = 1, .nargs = 2, .args = { { Resource, 0 }, { Rlimit | IN, 1 } } }, { .name = "setsockopt", .ret_type = 1, .nargs = 5, .args = { { Int, 0 }, { Sockoptlevel, 1 }, { Sockoptname, 2 }, { Ptr | IN, 3 }, { Socklent, 4 } } }, { .name = "shutdown", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Shutdown, 1 } } }, { .name = "sigaction", .ret_type = 1, .nargs = 3, .args = { { Signal, 0 }, { Sigaction | IN, 1 }, { Sigaction | OUT, 2 } } }, { .name = "sigpending", .ret_type = 1, .nargs = 1, .args = { { Sigset | OUT, 0 } } }, { .name = "sigprocmask", .ret_type = 1, .nargs = 3, .args = { { Sigprocmask, 0 }, { Sigset, 1 }, { Sigset | OUT, 2 } } }, { .name = "sigqueue", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Signal, 1 }, { LongHex, 2 } } }, { .name = "sigreturn", .ret_type = 1, .nargs = 1, .args = { { Ptr, 0 } } }, { .name = "sigsuspend", .ret_type = 1, .nargs = 1, .args = { { Sigset | IN, 0 } } }, { .name = "sigtimedwait", .ret_type = 1, .nargs = 3, .args = { { Sigset | IN, 0 }, { Ptr, 1 }, { Timespec | IN, 2 } } }, { .name = "sigwait", .ret_type = 1, .nargs = 2, .args = { { Sigset | IN, 0 }, { Ptr, 1 } } }, { .name = "sigwaitinfo", .ret_type = 1, .nargs = 2, .args = { { Sigset | IN, 0 }, { Ptr, 1 } } }, { .name = "socket", .ret_type = 1, .nargs = 3, .args = { { Sockdomain, 0 }, { Socktype, 1 }, { Sockprotocol, 2 } } }, { .name = "stat", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Stat | OUT, 1 } } }, { .name = "statfs", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { StatFs | OUT, 1 } } }, { .name = "symlink", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Name, 1 } } }, { .name = "symlinkat", .ret_type = 1, .nargs = 3, .args = { { Name, 0 }, { Atfd, 1 }, { Name, 2 } } }, { .name = "sysarch", .ret_type = 1, .nargs = 2, .args = { { Sysarch, 0 }, { Ptr, 1 } } }, { .name = "thr_kill", .ret_type = 1, .nargs = 2, .args = { { Long, 0 }, { Signal, 1 } } }, { .name = "thr_self", .ret_type = 1, .nargs = 1, .args = { { Ptr, 0 } } }, { .name = "truncate", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { QuadHex | IN, 1 } } }, #if 0 /* Does not exist */ { .name = "umount", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Int, 2 } } }, #endif { .name = "unlink", .ret_type = 1, .nargs = 1, .args = { { Name, 0 } } }, { .name = "unlinkat", .ret_type = 1, .nargs = 3, .args = { { Atfd, 0 }, { Name, 1 }, { Atflags, 2 } } }, { .name = "unmount", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Int, 1 } } }, { .name = "utimensat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name | IN, 1 }, { Timespec2 | IN, 2 }, { Atflags, 3 } } }, { .name = "utimes", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Timeval2 | IN, 1 } } }, { .name = "utrace", .ret_type = 1, .nargs = 1, .args = { { Utrace, 0 } } }, { .name = "wait4", .ret_type = 1, .nargs = 4, .args = { { Int, 0 }, { ExitStatus | OUT, 1 }, { Waitoptions, 2 }, { Rusage | OUT, 3 } } }, { .name = "wait6", .ret_type = 1, .nargs = 6, .args = { { Idtype, 0 }, { Quad, 1 }, { ExitStatus | OUT, 2 }, { Waitoptions, 3 }, { Rusage | OUT, 4 }, { Ptr, 5 } } }, { .name = "write", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { BinString | IN, 1 }, { Sizet, 2 } } }, /* Linux ABI */ { .name = "linux_access", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Accessmode, 1 } } }, { .name = "linux_execve", .ret_type = 1, .nargs = 3, .args = { { Name | IN, 0 }, { ExecArgs | IN, 1 }, { ExecEnv | IN, 2 } } }, { .name = "linux_lseek", .ret_type = 2, .nargs = 3, .args = { { Int, 0 }, { Int, 1 }, { Whence, 2 } } }, { .name = "linux_mkdir", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Int, 1 } } }, { .name = "linux_newfstat", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Ptr | OUT, 1 } } }, { .name = "linux_newstat", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Ptr | OUT, 1 } } }, { .name = "linux_open", .ret_type = 1, .nargs = 3, .args = { { Name, 0 }, { Hex, 1 }, { Octal, 2 } } }, { .name = "linux_readlink", .ret_type = 1, .nargs = 3, .args = { { Name, 0 }, { Name | OUT, 1 }, { Sizet, 2 } } }, { .name = "linux_socketcall", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { LinuxSockArgs, 1 } } }, { .name = "linux_stat64", .ret_type = 1, .nargs = 2, .args = { { Name | IN, 0 }, { Ptr | OUT, 1 } } }, /* CloudABI system calls. */ { .name = "cloudabi_sys_clock_res_get", .ret_type = 1, .nargs = 1, .args = { { CloudABIClockID, 0 } } }, { .name = "cloudabi_sys_clock_time_get", .ret_type = 1, .nargs = 2, .args = { { CloudABIClockID, 0 }, { CloudABITimestamp, 1 } } }, { .name = "cloudabi_sys_condvar_signal", .ret_type = 1, .nargs = 3, .args = { { Ptr, 0 }, { CloudABIMFlags, 1 }, { UInt, 2 } } }, { .name = "cloudabi_sys_fd_close", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "cloudabi_sys_fd_create1", .ret_type = 1, .nargs = 1, .args = { { CloudABIFileType, 0 } } }, { .name = "cloudabi_sys_fd_create2", .ret_type = 1, .nargs = 2, .args = { { CloudABIFileType, 0 }, { PipeFds | OUT, 0 } } }, { .name = "cloudabi_sys_fd_datasync", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "cloudabi_sys_fd_dup", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "cloudabi_sys_fd_replace", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Int, 1 } } }, { .name = "cloudabi_sys_fd_seek", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Int, 1 }, { CloudABIWhence, 2 } } }, { .name = "cloudabi_sys_fd_stat_get", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { CloudABIFDStat | OUT, 1 } } }, { .name = "cloudabi_sys_fd_stat_put", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { CloudABIFDStat | IN, 1 }, { ClouduABIFDSFlags, 2 } } }, { .name = "cloudabi_sys_fd_sync", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "cloudabi_sys_file_advise", .ret_type = 1, .nargs = 4, .args = { { Int, 0 }, { Int, 1 }, { Int, 2 }, { CloudABIAdvice, 3 } } }, { .name = "cloudabi_sys_file_allocate", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Int, 1 }, { Int, 2 } } }, { .name = "cloudabi_sys_file_create", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { BinString | IN, 1 }, { CloudABIFileType, 3 } } }, { .name = "cloudabi_sys_file_link", .ret_type = 1, .nargs = 4, .args = { { CloudABILookup, 0 }, { BinString | IN, 1 }, { Int, 3 }, { BinString | IN, 4 } } }, { .name = "cloudabi_sys_file_open", .ret_type = 1, .nargs = 4, .args = { { Int, 0 }, { BinString | IN, 1 }, { CloudABIOFlags, 3 }, { CloudABIFDStat | IN, 4 } } }, { .name = "cloudabi_sys_file_readdir", .ret_type = 1, .nargs = 4, .args = { { Int, 0 }, { BinString | OUT, 1 }, { Int, 2 }, { Int, 3 } } }, { .name = "cloudabi_sys_file_readlink", .ret_type = 1, .nargs = 4, .args = { { Int, 0 }, { BinString | IN, 1 }, { BinString | OUT, 3 }, { Int, 4 } } }, { .name = "cloudabi_sys_file_rename", .ret_type = 1, .nargs = 4, .args = { { Int, 0 }, { BinString | IN, 1 }, { Int, 3 }, { BinString | IN, 4 } } }, { .name = "cloudabi_sys_file_stat_fget", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { CloudABIFileStat | OUT, 1 } } }, { .name = "cloudabi_sys_file_stat_fput", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { CloudABIFileStat | IN, 1 }, { CloudABIFSFlags, 2 } } }, { .name = "cloudabi_sys_file_stat_get", .ret_type = 1, .nargs = 3, .args = { { CloudABILookup, 0 }, { BinString | IN, 1 }, { CloudABIFileStat | OUT, 3 } } }, { .name = "cloudabi_sys_file_stat_put", .ret_type = 1, .nargs = 4, .args = { { CloudABILookup, 0 }, { BinString | IN, 1 }, { CloudABIFileStat | IN, 3 }, { CloudABIFSFlags, 4 } } }, { .name = "cloudabi_sys_file_symlink", .ret_type = 1, .nargs = 3, .args = { { BinString | IN, 0 }, { Int, 2 }, { BinString | IN, 3 } } }, { .name = "cloudabi_sys_file_unlink", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { BinString | IN, 1 }, { CloudABIULFlags, 3 } } }, { .name = "cloudabi_sys_lock_unlock", .ret_type = 1, .nargs = 2, .args = { { Ptr, 0 }, { CloudABIMFlags, 1 } } }, { .name = "cloudabi_sys_mem_advise", .ret_type = 1, .nargs = 3, .args = { { Ptr, 0 }, { Int, 1 }, { CloudABIAdvice, 2 } } }, { .name = "cloudabi_sys_mem_lock", .ret_type = 1, .nargs = 2, .args = { { Ptr, 0 }, { Int, 1 } } }, { .name = "cloudabi_sys_mem_map", .ret_type = 1, .nargs = 6, .args = { { Ptr, 0 }, { Int, 1 }, { CloudABIMProt, 2 }, { CloudABIMFlags, 3 }, { Int, 4 }, { Int, 5 } } }, { .name = "cloudabi_sys_mem_protect", .ret_type = 1, .nargs = 3, .args = { { Ptr, 0 }, { Int, 1 }, { CloudABIMProt, 2 } } }, { .name = "cloudabi_sys_mem_sync", .ret_type = 1, .nargs = 3, .args = { { Ptr, 0 }, { Int, 1 }, { CloudABIMSFlags, 2 } } }, { .name = "cloudabi_sys_mem_unlock", .ret_type = 1, .nargs = 2, .args = { { Ptr, 0 }, { Int, 1 } } }, { .name = "cloudabi_sys_mem_unmap", .ret_type = 1, .nargs = 2, .args = { { Ptr, 0 }, { Int, 1 } } }, { .name = "cloudabi_sys_proc_exec", .ret_type = 1, .nargs = 5, .args = { { Int, 0 }, { BinString | IN, 1 }, { Int, 2 }, { IntArray, 3 }, { Int, 4 } } }, { .name = "cloudabi_sys_proc_exit", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "cloudabi_sys_proc_fork", .ret_type = 1, .nargs = 0 }, { .name = "cloudabi_sys_proc_raise", .ret_type = 1, .nargs = 1, .args = { { CloudABISignal, 0 } } }, { .name = "cloudabi_sys_random_get", .ret_type = 1, .nargs = 2, .args = { { BinString | OUT, 0 }, { Int, 1 } } }, { .name = "cloudabi_sys_sock_accept", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { CloudABISockStat | OUT, 1 } } }, { .name = "cloudabi_sys_sock_bind", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Int, 1 }, { BinString | IN, 2 } } }, { .name = "cloudabi_sys_sock_connect", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Int, 1 }, { BinString | IN, 2 } } }, { .name = "cloudabi_sys_sock_listen", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Int, 1 } } }, { .name = "cloudabi_sys_sock_shutdown", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { CloudABISDFlags, 1 } } }, { .name = "cloudabi_sys_sock_stat_get", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { CloudABISockStat | OUT, 1 }, { CloudABISSFlags, 2 } } }, { .name = "cloudabi_sys_thread_exit", .ret_type = 1, .nargs = 2, .args = { { Ptr, 0 }, { CloudABIMFlags, 1 } } }, { .name = "cloudabi_sys_thread_yield", .ret_type = 1, .nargs = 0 }, { .name = 0 }, }; static STAILQ_HEAD(, syscall) syscalls; /* Xlat idea taken from strace */ struct xlat { int val; const char *str; }; #define X(a) { a, #a }, #define XEND { 0, NULL } static struct xlat kevent_filters[] = { X(EVFILT_READ) X(EVFILT_WRITE) X(EVFILT_AIO) X(EVFILT_VNODE) X(EVFILT_PROC) X(EVFILT_SIGNAL) X(EVFILT_TIMER) X(EVFILT_PROCDESC) X(EVFILT_FS) X(EVFILT_LIO) X(EVFILT_USER) X(EVFILT_SENDFILE) XEND }; static struct xlat kevent_flags[] = { X(EV_ADD) X(EV_DELETE) X(EV_ENABLE) X(EV_DISABLE) X(EV_ONESHOT) X(EV_CLEAR) X(EV_RECEIPT) X(EV_DISPATCH) X(EV_FORCEONESHOT) X(EV_DROP) X(EV_FLAG1) X(EV_ERROR) X(EV_EOF) XEND }; static struct xlat kevent_user_ffctrl[] = { X(NOTE_FFNOP) X(NOTE_FFAND) X(NOTE_FFOR) X(NOTE_FFCOPY) XEND }; static struct xlat kevent_rdwr_fflags[] = { X(NOTE_LOWAT) X(NOTE_FILE_POLL) XEND }; static struct xlat kevent_vnode_fflags[] = { X(NOTE_DELETE) X(NOTE_WRITE) X(NOTE_EXTEND) X(NOTE_ATTRIB) X(NOTE_LINK) X(NOTE_RENAME) X(NOTE_REVOKE) XEND }; static struct xlat kevent_proc_fflags[] = { X(NOTE_EXIT) X(NOTE_FORK) X(NOTE_EXEC) X(NOTE_TRACK) X(NOTE_TRACKERR) X(NOTE_CHILD) XEND }; static struct xlat kevent_timer_fflags[] = { X(NOTE_SECONDS) X(NOTE_MSECONDS) X(NOTE_USECONDS) X(NOTE_NSECONDS) XEND }; static struct xlat poll_flags[] = { X(POLLSTANDARD) X(POLLIN) X(POLLPRI) X(POLLOUT) X(POLLERR) X(POLLHUP) X(POLLNVAL) X(POLLRDNORM) X(POLLRDBAND) X(POLLWRBAND) X(POLLINIGNEOF) XEND }; static struct xlat sigaction_flags[] = { X(SA_ONSTACK) X(SA_RESTART) X(SA_RESETHAND) X(SA_NOCLDSTOP) X(SA_NODEFER) X(SA_NOCLDWAIT) X(SA_SIGINFO) XEND }; static struct xlat pathconf_arg[] = { X(_PC_LINK_MAX) X(_PC_MAX_CANON) X(_PC_MAX_INPUT) X(_PC_NAME_MAX) X(_PC_PATH_MAX) X(_PC_PIPE_BUF) X(_PC_CHOWN_RESTRICTED) X(_PC_NO_TRUNC) X(_PC_VDISABLE) X(_PC_ASYNC_IO) X(_PC_PRIO_IO) X(_PC_SYNC_IO) X(_PC_ALLOC_SIZE_MIN) X(_PC_FILESIZEBITS) X(_PC_REC_INCR_XFER_SIZE) X(_PC_REC_MAX_XFER_SIZE) X(_PC_REC_MIN_XFER_SIZE) X(_PC_REC_XFER_ALIGN) X(_PC_SYMLINK_MAX) X(_PC_ACL_EXTENDED) X(_PC_ACL_PATH_MAX) X(_PC_CAP_PRESENT) X(_PC_INF_PRESENT) X(_PC_MAC_PRESENT) X(_PC_ACL_NFS4) X(_PC_MIN_HOLE_SIZE) XEND }; static struct xlat at_flags[] = { X(AT_EACCESS) X(AT_SYMLINK_NOFOLLOW) X(AT_SYMLINK_FOLLOW) X(AT_REMOVEDIR) XEND }; static struct xlat sysarch_ops[] = { #if defined(__i386__) || defined(__amd64__) X(I386_GET_LDT) X(I386_SET_LDT) X(I386_GET_IOPERM) X(I386_SET_IOPERM) X(I386_VM86) X(I386_GET_FSBASE) X(I386_SET_FSBASE) X(I386_GET_GSBASE) X(I386_SET_GSBASE) X(I386_GET_XFPUSTATE) X(AMD64_GET_FSBASE) X(AMD64_SET_FSBASE) X(AMD64_GET_GSBASE) X(AMD64_SET_GSBASE) X(AMD64_GET_XFPUSTATE) #endif XEND }; static struct xlat linux_socketcall_ops[] = { X(LINUX_SOCKET) X(LINUX_BIND) X(LINUX_CONNECT) X(LINUX_LISTEN) X(LINUX_ACCEPT) X(LINUX_GETSOCKNAME) X(LINUX_GETPEERNAME) X(LINUX_SOCKETPAIR) X(LINUX_SEND) X(LINUX_RECV) X(LINUX_SENDTO) X(LINUX_RECVFROM) X(LINUX_SHUTDOWN) X(LINUX_SETSOCKOPT) X(LINUX_GETSOCKOPT) X(LINUX_SENDMSG) X(LINUX_RECVMSG) XEND }; #undef X #define X(a) { CLOUDABI_##a, #a }, static struct xlat cloudabi_advice[] = { X(ADVICE_DONTNEED) X(ADVICE_NOREUSE) X(ADVICE_NORMAL) X(ADVICE_RANDOM) X(ADVICE_SEQUENTIAL) X(ADVICE_WILLNEED) XEND }; static struct xlat cloudabi_clockid[] = { X(CLOCK_MONOTONIC) X(CLOCK_PROCESS_CPUTIME_ID) X(CLOCK_REALTIME) X(CLOCK_THREAD_CPUTIME_ID) XEND }; static struct xlat cloudabi_errno[] = { X(E2BIG) X(EACCES) X(EADDRINUSE) X(EADDRNOTAVAIL) X(EAFNOSUPPORT) X(EAGAIN) X(EALREADY) X(EBADF) X(EBADMSG) X(EBUSY) X(ECANCELED) X(ECHILD) X(ECONNABORTED) X(ECONNREFUSED) X(ECONNRESET) X(EDEADLK) X(EDESTADDRREQ) X(EDOM) X(EDQUOT) X(EEXIST) X(EFAULT) X(EFBIG) X(EHOSTUNREACH) X(EIDRM) X(EILSEQ) X(EINPROGRESS) X(EINTR) X(EINVAL) X(EIO) X(EISCONN) X(EISDIR) X(ELOOP) X(EMFILE) X(EMLINK) X(EMSGSIZE) X(EMULTIHOP) X(ENAMETOOLONG) X(ENETDOWN) X(ENETRESET) X(ENETUNREACH) X(ENFILE) X(ENOBUFS) X(ENODEV) X(ENOENT) X(ENOEXEC) X(ENOLCK) X(ENOLINK) X(ENOMEM) X(ENOMSG) X(ENOPROTOOPT) X(ENOSPC) X(ENOSYS) X(ENOTCONN) X(ENOTDIR) X(ENOTEMPTY) X(ENOTRECOVERABLE) X(ENOTSOCK) X(ENOTSUP) X(ENOTTY) X(ENXIO) X(EOVERFLOW) X(EOWNERDEAD) X(EPERM) X(EPIPE) X(EPROTO) X(EPROTONOSUPPORT) X(EPROTOTYPE) X(ERANGE) X(EROFS) X(ESPIPE) X(ESRCH) X(ESTALE) X(ETIMEDOUT) X(ETXTBSY) X(EXDEV) X(ENOTCAPABLE) XEND }; static struct xlat cloudabi_fdflags[] = { X(FDFLAG_APPEND) X(FDFLAG_DSYNC) X(FDFLAG_NONBLOCK) X(FDFLAG_RSYNC) X(FDFLAG_SYNC) XEND }; static struct xlat cloudabi_fdsflags[] = { X(FDSTAT_FLAGS) X(FDSTAT_RIGHTS) XEND }; static struct xlat cloudabi_filetype[] = { X(FILETYPE_UNKNOWN) X(FILETYPE_BLOCK_DEVICE) X(FILETYPE_CHARACTER_DEVICE) X(FILETYPE_DIRECTORY) X(FILETYPE_FIFO) X(FILETYPE_POLL) X(FILETYPE_PROCESS) X(FILETYPE_REGULAR_FILE) X(FILETYPE_SHARED_MEMORY) X(FILETYPE_SOCKET_DGRAM) X(FILETYPE_SOCKET_SEQPACKET) X(FILETYPE_SOCKET_STREAM) X(FILETYPE_SYMBOLIC_LINK) XEND }; static struct xlat cloudabi_fsflags[] = { X(FILESTAT_ATIM) X(FILESTAT_ATIM_NOW) X(FILESTAT_MTIM) X(FILESTAT_MTIM_NOW) X(FILESTAT_SIZE) XEND }; static struct xlat cloudabi_mflags[] = { X(MAP_ANON) X(MAP_FIXED) X(MAP_PRIVATE) X(MAP_SHARED) XEND }; static struct xlat cloudabi_mprot[] = { X(PROT_EXEC) X(PROT_WRITE) X(PROT_READ) XEND }; static struct xlat cloudabi_msflags[] = { X(MS_ASYNC) X(MS_INVALIDATE) X(MS_SYNC) XEND }; static struct xlat cloudabi_oflags[] = { X(O_CREAT) X(O_DIRECTORY) X(O_EXCL) X(O_TRUNC) XEND }; static struct xlat cloudabi_sa_family[] = { X(AF_UNSPEC) X(AF_INET) X(AF_INET6) X(AF_UNIX) XEND }; static struct xlat cloudabi_sdflags[] = { X(SHUT_RD) X(SHUT_WR) XEND }; static struct xlat cloudabi_signal[] = { X(SIGABRT) X(SIGALRM) X(SIGBUS) X(SIGCHLD) X(SIGCONT) X(SIGFPE) X(SIGHUP) X(SIGILL) X(SIGINT) X(SIGKILL) X(SIGPIPE) X(SIGQUIT) X(SIGSEGV) X(SIGSTOP) X(SIGSYS) X(SIGTERM) X(SIGTRAP) X(SIGTSTP) X(SIGTTIN) X(SIGTTOU) X(SIGURG) X(SIGUSR1) X(SIGUSR2) X(SIGVTALRM) X(SIGXCPU) X(SIGXFSZ) XEND }; static struct xlat cloudabi_ssflags[] = { X(SOCKSTAT_CLEAR_ERROR) XEND }; static struct xlat cloudabi_ssstate[] = { X(SOCKSTATE_ACCEPTCONN) XEND }; static struct xlat cloudabi_ulflags[] = { X(UNLINK_REMOVEDIR) XEND }; static struct xlat cloudabi_whence[] = { X(WHENCE_CUR) X(WHENCE_END) X(WHENCE_SET) XEND }; #undef X #undef XEND /* * Searches an xlat array for a value, and returns it if found. Otherwise * return a string representation. */ static const char * lookup(struct xlat *xlat, int val, int base) { static char tmp[16]; for (; xlat->str != NULL; xlat++) if (xlat->val == val) return (xlat->str); switch (base) { case 8: sprintf(tmp, "0%o", val); break; case 16: sprintf(tmp, "0x%x", val); break; case 10: sprintf(tmp, "%u", val); break; default: errx(1,"Unknown lookup base"); break; } return (tmp); } static const char * xlookup(struct xlat *xlat, int val) { return (lookup(xlat, val, 16)); } /* * Searches an xlat array containing bitfield values. Remaining bits * set after removing the known ones are printed at the end: * IN|0x400. */ static char * xlookup_bits(struct xlat *xlat, int val) { int len, rem; static char str[512]; len = 0; rem = val; for (; xlat->str != NULL; xlat++) { if ((xlat->val & rem) == xlat->val) { /* * Don't print the "all-bits-zero" string unless all * bits are really zero. */ if (xlat->val == 0 && val != 0) continue; len += sprintf(str + len, "%s|", xlat->str); rem &= ~(xlat->val); } } /* * If we have leftover bits or didn't match anything, print * the remainder. */ if (rem || len == 0) len += sprintf(str + len, "0x%x", rem); if (len && str[len - 1] == '|') len--; str[len] = 0; return (str); } static void print_integer_arg(const char *(*decoder)(int), FILE *fp, int value) { const char *str; str = decoder(value); if (str != NULL) fputs(str, fp); else fprintf(fp, "%d", value); } static void print_mask_arg(bool (*decoder)(FILE *, int, int *), FILE *fp, int value) { int rem; if (!decoder(fp, value, &rem)) fprintf(fp, "0x%x", rem); else if (rem != 0) fprintf(fp, "|0x%x", rem); } static void print_mask_arg32(bool (*decoder)(FILE *, uint32_t, uint32_t *), FILE *fp, uint32_t value) { uint32_t rem; if (!decoder(fp, value, &rem)) fprintf(fp, "0x%x", rem); else if (rem != 0) fprintf(fp, "|0x%x", rem); } #ifndef __LP64__ /* * Add argument padding to subsequent system calls afater a Quad * syscall arguments as needed. This used to be done by hand in the * decoded_syscalls table which was ugly and error prone. It is * simpler to do the fixup of offsets at initalization time than when * decoding arguments. */ static void quad_fixup(struct syscall *sc) { int offset, prev; u_int i; offset = 0; prev = -1; for (i = 0; i < sc->nargs; i++) { /* This arg type is a dummy that doesn't use offset. */ if ((sc->args[i].type & ARG_MASK) == PipeFds) continue; assert(prev < sc->args[i].offset); prev = sc->args[i].offset; sc->args[i].offset += offset; switch (sc->args[i].type & ARG_MASK) { case Quad: case QuadHex: #ifdef __powerpc__ /* * 64-bit arguments on 32-bit powerpc must be * 64-bit aligned. If the current offset is * not aligned, the calling convention inserts * a 32-bit pad argument that should be skipped. */ if (sc->args[i].offset % 2 == 1) { sc->args[i].offset++; offset++; } #endif offset++; default: break; } } } #endif void init_syscalls(void) { struct syscall *sc; STAILQ_INIT(&syscalls); for (sc = decoded_syscalls; sc->name != NULL; sc++) { #ifndef __LP64__ quad_fixup(sc); #endif STAILQ_INSERT_HEAD(&syscalls, sc, entries); } } static struct syscall * find_syscall(struct procabi *abi, u_int number) { struct extra_syscall *es; if (number < nitems(abi->syscalls)) return (abi->syscalls[number]); STAILQ_FOREACH(es, &abi->extra_syscalls, entries) { if (es->number == number) return (es->sc); } return (NULL); } static void add_syscall(struct procabi *abi, u_int number, struct syscall *sc) { struct extra_syscall *es; if (number < nitems(abi->syscalls)) { assert(abi->syscalls[number] == NULL); abi->syscalls[number] = sc; } else { es = malloc(sizeof(*es)); es->sc = sc; es->number = number; STAILQ_INSERT_TAIL(&abi->extra_syscalls, es, entries); } } /* * If/when the list gets big, it might be desirable to do it * as a hash table or binary search. */ struct syscall * get_syscall(struct threadinfo *t, u_int number, u_int nargs) { struct syscall *sc; const char *name; char *new_name; u_int i; sc = find_syscall(t->proc->abi, number); if (sc != NULL) return (sc); name = sysdecode_syscallname(t->proc->abi->abi, number); if (name == NULL) { asprintf(&new_name, "#%d", number); name = new_name; } else new_name = NULL; STAILQ_FOREACH(sc, &syscalls, entries) { if (strcmp(name, sc->name) == 0) { add_syscall(t->proc->abi, number, sc); free(new_name); return (sc); } } /* It is unknown. Add it into the list. */ #if DEBUG fprintf(stderr, "unknown syscall %s -- setting args to %d\n", name, nargs); #endif sc = calloc(1, sizeof(struct syscall)); sc->name = name; if (new_name != NULL) sc->unknown = true; sc->ret_type = 1; sc->nargs = nargs; for (i = 0; i < nargs; i++) { sc->args[i].offset = i; /* Treat all unknown arguments as LongHex. */ sc->args[i].type = LongHex; } STAILQ_INSERT_HEAD(&syscalls, sc, entries); add_syscall(t->proc->abi, number, sc); return (sc); } /* * Copy a fixed amount of bytes from the process. */ static int get_struct(pid_t pid, void *offset, void *buf, int len) { struct ptrace_io_desc iorequest; iorequest.piod_op = PIOD_READ_D; iorequest.piod_offs = offset; iorequest.piod_addr = buf; iorequest.piod_len = len; if (ptrace(PT_IO, pid, (caddr_t)&iorequest, 0) < 0) return (-1); return (0); } #define MAXSIZE 4096 /* * Copy a string from the process. Note that it is * expected to be a C string, but if max is set, it will * only get that much. */ static char * get_string(pid_t pid, void *addr, int max) { struct ptrace_io_desc iorequest; char *buf, *nbuf; size_t offset, size, totalsize; offset = 0; if (max) size = max + 1; else { /* Read up to the end of the current page. */ size = PAGE_SIZE - ((uintptr_t)addr % PAGE_SIZE); if (size > MAXSIZE) size = MAXSIZE; } totalsize = size; buf = malloc(totalsize); if (buf == NULL) return (NULL); for (;;) { iorequest.piod_op = PIOD_READ_D; iorequest.piod_offs = (char *)addr + offset; iorequest.piod_addr = buf + offset; iorequest.piod_len = size; if (ptrace(PT_IO, pid, (caddr_t)&iorequest, 0) < 0) { free(buf); return (NULL); } if (memchr(buf + offset, '\0', size) != NULL) return (buf); offset += size; if (totalsize < MAXSIZE && max == 0) { size = MAXSIZE - totalsize; if (size > PAGE_SIZE) size = PAGE_SIZE; nbuf = realloc(buf, totalsize + size); if (nbuf == NULL) { buf[totalsize - 1] = '\0'; return (buf); } buf = nbuf; totalsize += size; } else { buf[totalsize - 1] = '\0'; return (buf); } } } static const char * strsig2(int sig) { static char tmp[32]; const char *signame; signame = sysdecode_signal(sig); if (signame == NULL) { snprintf(tmp, sizeof(tmp), "%d", sig); signame = tmp; } return (signame); } static void print_kevent(FILE *fp, struct kevent *ke, int input) { switch (ke->filter) { case EVFILT_READ: case EVFILT_WRITE: case EVFILT_VNODE: case EVFILT_PROC: case EVFILT_TIMER: case EVFILT_PROCDESC: fprintf(fp, "%ju", (uintmax_t)ke->ident); break; case EVFILT_SIGNAL: fputs(strsig2(ke->ident), fp); break; default: fprintf(fp, "%p", (void *)ke->ident); } fprintf(fp, ",%s,%s,", xlookup(kevent_filters, ke->filter), xlookup_bits(kevent_flags, ke->flags)); switch (ke->filter) { case EVFILT_READ: case EVFILT_WRITE: fputs(xlookup_bits(kevent_rdwr_fflags, ke->fflags), fp); break; case EVFILT_VNODE: fputs(xlookup_bits(kevent_vnode_fflags, ke->fflags), fp); break; case EVFILT_PROC: case EVFILT_PROCDESC: fputs(xlookup_bits(kevent_proc_fflags, ke->fflags), fp); break; case EVFILT_TIMER: fputs(xlookup_bits(kevent_timer_fflags, ke->fflags), fp); break; case EVFILT_USER: { int ctrl, data; ctrl = ke->fflags & NOTE_FFCTRLMASK; data = ke->fflags & NOTE_FFLAGSMASK; if (input) { fputs(xlookup(kevent_user_ffctrl, ctrl), fp); if (ke->fflags & NOTE_TRIGGER) fputs("|NOTE_TRIGGER", fp); if (data != 0) fprintf(fp, "|%#x", data); } else { fprintf(fp, "%#x", data); } break; } default: fprintf(fp, "%#x", ke->fflags); } fprintf(fp, ",%p,%p", (void *)ke->data, (void *)ke->udata); } static void print_utrace(FILE *fp, void *utrace_addr, size_t len) { unsigned char *utrace_buffer; fprintf(fp, "{ "); if (sysdecode_utrace(fp, utrace_addr, len)) { fprintf(fp, " }"); return; } utrace_buffer = utrace_addr; fprintf(fp, "%zu:", len); while (len--) fprintf(fp, " %02x", *utrace_buffer++); fprintf(fp, " }"); } /* * Converts a syscall argument into a string. Said string is * allocated via malloc(), so needs to be free()'d. sc is * a pointer to the syscall description (see above); args is * an array of all of the system call arguments. */ char * print_arg(struct syscall_args *sc, unsigned long *args, long *retval, struct trussinfo *trussinfo) { FILE *fp; char *tmp; size_t tmplen; pid_t pid; fp = open_memstream(&tmp, &tmplen); pid = trussinfo->curthread->proc->pid; switch (sc->type & ARG_MASK) { case Hex: fprintf(fp, "0x%x", (int)args[sc->offset]); break; case Octal: fprintf(fp, "0%o", (int)args[sc->offset]); break; case Int: fprintf(fp, "%d", (int)args[sc->offset]); break; case UInt: fprintf(fp, "%u", (unsigned int)args[sc->offset]); break; + case PUInt: { + unsigned int val; + + if (get_struct(pid, (void *)args[sc->offset], &val, + sizeof(val)) == 0) + fprintf(fp, "{ %u }", val); + else + fprintf(fp, "0x%lx", args[sc->offset]); + break; + } case LongHex: fprintf(fp, "0x%lx", args[sc->offset]); break; case Long: fprintf(fp, "%ld", args[sc->offset]); break; case Sizet: fprintf(fp, "%zu", (size_t)args[sc->offset]); break; case Name: { /* NULL-terminated string. */ char *tmp2; tmp2 = get_string(pid, (void*)args[sc->offset], 0); fprintf(fp, "\"%s\"", tmp2); free(tmp2); break; } case BinString: { /* * Binary block of data that might have printable characters. * XXX If type|OUT, assume that the length is the syscall's * return value. Otherwise, assume that the length of the block * is in the next syscall argument. */ int max_string = trussinfo->strsize; char tmp2[max_string + 1], *tmp3; int len; int truncated = 0; if (sc->type & OUT) len = retval[0]; else len = args[sc->offset + 1]; /* * Don't print more than max_string characters, to avoid word * wrap. If we have to truncate put some ... after the string. */ if (len > max_string) { len = max_string; truncated = 1; } if (len && get_struct(pid, (void*)args[sc->offset], &tmp2, len) != -1) { tmp3 = malloc(len * 4 + 1); while (len) { if (strvisx(tmp3, tmp2, len, VIS_CSTYLE|VIS_TAB|VIS_NL) <= max_string) break; len--; truncated = 1; } fprintf(fp, "\"%s\"%s", tmp3, truncated ? "..." : ""); free(tmp3); } else { fprintf(fp, "0x%lx", args[sc->offset]); } break; } case ExecArgs: case ExecEnv: case StringArray: { uintptr_t addr; union { char *strarray[0]; char buf[PAGE_SIZE]; } u; char *string; size_t len; u_int first, i; /* * Only parse argv[] and environment arrays from exec calls * if requested. */ if (((sc->type & ARG_MASK) == ExecArgs && (trussinfo->flags & EXECVEARGS) == 0) || ((sc->type & ARG_MASK) == ExecEnv && (trussinfo->flags & EXECVEENVS) == 0)) { fprintf(fp, "0x%lx", args[sc->offset]); break; } /* * Read a page of pointers at a time. Punt if the top-level * pointer is not aligned. Note that the first read is of * a partial page. */ addr = args[sc->offset]; if (addr % sizeof(char *) != 0) { fprintf(fp, "0x%lx", args[sc->offset]); break; } len = PAGE_SIZE - (addr & PAGE_MASK); if (get_struct(pid, (void *)addr, u.buf, len) == -1) { fprintf(fp, "0x%lx", args[sc->offset]); break; } fputc('[', fp); first = 1; i = 0; while (u.strarray[i] != NULL) { string = get_string(pid, u.strarray[i], 0); fprintf(fp, "%s \"%s\"", first ? "" : ",", string); free(string); first = 0; i++; if (i == len / sizeof(char *)) { addr += len; len = PAGE_SIZE; if (get_struct(pid, (void *)addr, u.buf, len) == -1) { fprintf(fp, ", "); break; } i = 0; } } fputs(" ]", fp); break; } #ifdef __LP64__ case Quad: fprintf(fp, "%ld", args[sc->offset]); break; case QuadHex: fprintf(fp, "0x%lx", args[sc->offset]); break; #else case Quad: case QuadHex: { unsigned long long ll; #if _BYTE_ORDER == _LITTLE_ENDIAN ll = (unsigned long long)args[sc->offset + 1] << 32 | args[sc->offset]; #else ll = (unsigned long long)args[sc->offset] << 32 | args[sc->offset + 1]; #endif if ((sc->type & ARG_MASK) == Quad) fprintf(fp, "%lld", ll); else fprintf(fp, "0x%llx", ll); break; } #endif case Ptr: fprintf(fp, "0x%lx", args[sc->offset]); break; case Readlinkres: { char *tmp2; if (retval[0] == -1) break; tmp2 = get_string(pid, (void*)args[sc->offset], retval[0]); fprintf(fp, "\"%s\"", tmp2); free(tmp2); break; } case Ioctl: { const char *temp; unsigned long cmd; cmd = args[sc->offset]; temp = sysdecode_ioctlname(cmd); if (temp) fputs(temp, fp); else { fprintf(fp, "0x%lx { IO%s%s 0x%lx('%c'), %lu, %lu }", cmd, cmd & IOC_OUT ? "R" : "", cmd & IOC_IN ? "W" : "", IOCGROUP(cmd), isprint(IOCGROUP(cmd)) ? (char)IOCGROUP(cmd) : '?', cmd & 0xFF, IOCPARM_LEN(cmd)); } break; } case Timespec: { struct timespec ts; if (get_struct(pid, (void *)args[sc->offset], &ts, sizeof(ts)) != -1) fprintf(fp, "{ %jd.%09ld }", (intmax_t)ts.tv_sec, ts.tv_nsec); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Timespec2: { struct timespec ts[2]; const char *sep; unsigned int i; if (get_struct(pid, (void *)args[sc->offset], &ts, sizeof(ts)) != -1) { fputs("{ ", fp); sep = ""; for (i = 0; i < nitems(ts); i++) { fputs(sep, fp); sep = ", "; switch (ts[i].tv_nsec) { case UTIME_NOW: fprintf(fp, "UTIME_NOW"); break; case UTIME_OMIT: fprintf(fp, "UTIME_OMIT"); break; default: fprintf(fp, "%jd.%09ld", (intmax_t)ts[i].tv_sec, ts[i].tv_nsec); break; } } fputs(" }", fp); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Timeval: { struct timeval tv; if (get_struct(pid, (void *)args[sc->offset], &tv, sizeof(tv)) != -1) fprintf(fp, "{ %jd.%06ld }", (intmax_t)tv.tv_sec, tv.tv_usec); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Timeval2: { struct timeval tv[2]; if (get_struct(pid, (void *)args[sc->offset], &tv, sizeof(tv)) != -1) fprintf(fp, "{ %jd.%06ld, %jd.%06ld }", (intmax_t)tv[0].tv_sec, tv[0].tv_usec, (intmax_t)tv[1].tv_sec, tv[1].tv_usec); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Itimerval: { struct itimerval itv; if (get_struct(pid, (void *)args[sc->offset], &itv, sizeof(itv)) != -1) fprintf(fp, "{ %jd.%06ld, %jd.%06ld }", (intmax_t)itv.it_interval.tv_sec, itv.it_interval.tv_usec, (intmax_t)itv.it_value.tv_sec, itv.it_value.tv_usec); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case LinuxSockArgs: { struct linux_socketcall_args largs; if (get_struct(pid, (void *)args[sc->offset], (void *)&largs, sizeof(largs)) != -1) fprintf(fp, "{ %s, 0x%lx }", lookup(linux_socketcall_ops, largs.what, 10), (long unsigned int)largs.args); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Pollfd: { /* * XXX: A Pollfd argument expects the /next/ syscall argument * to be the number of fds in the array. This matches the poll * syscall. */ struct pollfd *pfd; int numfds = args[sc->offset + 1]; size_t bytes = sizeof(struct pollfd) * numfds; int i; if ((pfd = malloc(bytes)) == NULL) err(1, "Cannot malloc %zu bytes for pollfd array", bytes); if (get_struct(pid, (void *)args[sc->offset], pfd, bytes) != -1) { fputs("{", fp); for (i = 0; i < numfds; i++) { fprintf(fp, " %d/%s", pfd[i].fd, xlookup_bits(poll_flags, pfd[i].events)); } fputs(" }", fp); } else { fprintf(fp, "0x%lx", args[sc->offset]); } free(pfd); break; } case Fd_set: { /* * XXX: A Fd_set argument expects the /first/ syscall argument * to be the number of fds in the array. This matches the * select syscall. */ fd_set *fds; int numfds = args[0]; size_t bytes = _howmany(numfds, _NFDBITS) * _NFDBITS; int i; if ((fds = malloc(bytes)) == NULL) err(1, "Cannot malloc %zu bytes for fd_set array", bytes); if (get_struct(pid, (void *)args[sc->offset], fds, bytes) != -1) { fputs("{", fp); for (i = 0; i < numfds; i++) { if (FD_ISSET(i, fds)) fprintf(fp, " %d", i); } fputs(" }", fp); } else fprintf(fp, "0x%lx", args[sc->offset]); free(fds); break; } case Signal: fputs(strsig2(args[sc->offset]), fp); break; case Sigset: { long sig; sigset_t ss; int i, first; sig = args[sc->offset]; if (get_struct(pid, (void *)args[sc->offset], (void *)&ss, sizeof(ss)) == -1) { fprintf(fp, "0x%lx", args[sc->offset]); break; } fputs("{ ", fp); first = 1; for (i = 1; i < sys_nsig; i++) { if (sigismember(&ss, i)) { fprintf(fp, "%s%s", !first ? "|" : "", strsig2(i)); first = 0; } } if (!first) fputc(' ', fp); fputc('}', fp); break; } case Sigprocmask: print_integer_arg(sysdecode_sigprocmask_how, fp, args[sc->offset]); break; case Fcntlflag: /* XXX: Output depends on the value of the previous argument. */ if (sysdecode_fcntl_arg_p(args[sc->offset - 1])) sysdecode_fcntl_arg(fp, args[sc->offset - 1], args[sc->offset], 16); break; case Open: print_mask_arg(sysdecode_open_flags, fp, args[sc->offset]); break; case Fcntl: print_integer_arg(sysdecode_fcntl_cmd, fp, args[sc->offset]); break; case Mprot: print_mask_arg(sysdecode_mmap_prot, fp, args[sc->offset]); break; case Mmapflags: print_mask_arg(sysdecode_mmap_flags, fp, args[sc->offset]); break; case Whence: print_integer_arg(sysdecode_whence, fp, args[sc->offset]); break; case Sockdomain: print_integer_arg(sysdecode_socketdomain, fp, args[sc->offset]); break; case Socktype: print_mask_arg(sysdecode_socket_type, fp, args[sc->offset]); break; case Shutdown: print_integer_arg(sysdecode_shutdown_how, fp, args[sc->offset]); break; case Resource: print_integer_arg(sysdecode_rlimit, fp, args[sc->offset]); break; + case RusageWho: + print_integer_arg(sysdecode_getrusage_who, fp, args[sc->offset]); + break; case Pathconf: fputs(xlookup(pathconf_arg, args[sc->offset]), fp); break; case Rforkflags: print_mask_arg(sysdecode_rfork_flags, fp, args[sc->offset]); break; case Sockaddr: { char addr[64]; struct sockaddr_in *lsin; struct sockaddr_in6 *lsin6; struct sockaddr_un *sun; struct sockaddr *sa; socklen_t len; u_char *q; if (args[sc->offset] == 0) { fputs("NULL", fp); break; } /* * Extract the address length from the next argument. If * this is an output sockaddr (OUT is set), then the * next argument is a pointer to a socklen_t. Otherwise * the next argument contains a socklen_t by value. */ if (sc->type & OUT) { if (get_struct(pid, (void *)args[sc->offset + 1], &len, sizeof(len)) == -1) { fprintf(fp, "0x%lx", args[sc->offset]); break; } } else len = args[sc->offset + 1]; /* If the length is too small, just bail. */ if (len < sizeof(*sa)) { fprintf(fp, "0x%lx", args[sc->offset]); break; } sa = calloc(1, len); if (get_struct(pid, (void *)args[sc->offset], sa, len) == -1) { free(sa); fprintf(fp, "0x%lx", args[sc->offset]); break; } switch (sa->sa_family) { case AF_INET: if (len < sizeof(*lsin)) goto sockaddr_short; lsin = (struct sockaddr_in *)(void *)sa; inet_ntop(AF_INET, &lsin->sin_addr, addr, sizeof(addr)); fprintf(fp, "{ AF_INET %s:%d }", addr, htons(lsin->sin_port)); break; case AF_INET6: if (len < sizeof(*lsin6)) goto sockaddr_short; lsin6 = (struct sockaddr_in6 *)(void *)sa; inet_ntop(AF_INET6, &lsin6->sin6_addr, addr, sizeof(addr)); fprintf(fp, "{ AF_INET6 [%s]:%d }", addr, htons(lsin6->sin6_port)); break; case AF_UNIX: sun = (struct sockaddr_un *)sa; fprintf(fp, "{ AF_UNIX \"%.*s\" }", (int)(len - offsetof(struct sockaddr_un, sun_path)), sun->sun_path); break; default: sockaddr_short: fprintf(fp, "{ sa_len = %d, sa_family = %d, sa_data = {", (int)sa->sa_len, (int)sa->sa_family); for (q = (u_char *)sa->sa_data; q < (u_char *)sa + len; q++) fprintf(fp, "%s 0x%02x", q == (u_char *)sa->sa_data ? "" : ",", *q); fputs(" } }", fp); } free(sa); break; } case Sigaction: { struct sigaction sa; if (get_struct(pid, (void *)args[sc->offset], &sa, sizeof(sa)) != -1) { fputs("{ ", fp); if (sa.sa_handler == SIG_DFL) fputs("SIG_DFL", fp); else if (sa.sa_handler == SIG_IGN) fputs("SIG_IGN", fp); else fprintf(fp, "%p", sa.sa_handler); fprintf(fp, " %s ss_t }", xlookup_bits(sigaction_flags, sa.sa_flags)); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Kevent: { /* * XXX XXX: The size of the array is determined by either the * next syscall argument, or by the syscall return value, * depending on which argument number we are. This matches the * kevent syscall, but luckily that's the only syscall that uses * them. */ struct kevent *ke; int numevents = -1; size_t bytes; int i; if (sc->offset == 1) numevents = args[sc->offset+1]; else if (sc->offset == 3 && retval[0] != -1) numevents = retval[0]; if (numevents >= 0) { bytes = sizeof(struct kevent) * numevents; if ((ke = malloc(bytes)) == NULL) err(1, "Cannot malloc %zu bytes for kevent array", bytes); } else ke = NULL; if (numevents >= 0 && get_struct(pid, (void *)args[sc->offset], ke, bytes) != -1) { fputc('{', fp); for (i = 0; i < numevents; i++) { fputc(' ', fp); print_kevent(fp, &ke[i], sc->offset == 1); } fputs(" }", fp); } else { fprintf(fp, "0x%lx", args[sc->offset]); } free(ke); break; } case Stat: { struct stat st; if (get_struct(pid, (void *)args[sc->offset], &st, sizeof(st)) != -1) { char mode[12]; strmode(st.st_mode, mode); fprintf(fp, "{ mode=%s,inode=%ju,size=%jd,blksize=%ld }", mode, (uintmax_t)st.st_ino, (intmax_t)st.st_size, (long)st.st_blksize); } else { fprintf(fp, "0x%lx", args[sc->offset]); } break; } case StatFs: { unsigned int i; struct statfs buf; if (get_struct(pid, (void *)args[sc->offset], &buf, sizeof(buf)) != -1) { char fsid[17]; bzero(fsid, sizeof(fsid)); if (buf.f_fsid.val[0] != 0 || buf.f_fsid.val[1] != 0) { for (i = 0; i < sizeof(buf.f_fsid); i++) snprintf(&fsid[i*2], sizeof(fsid) - (i*2), "%02x", ((u_char *)&buf.f_fsid)[i]); } fprintf(fp, "{ fstypename=%s,mntonname=%s,mntfromname=%s," "fsid=%s }", buf.f_fstypename, buf.f_mntonname, buf.f_mntfromname, fsid); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Rusage: { struct rusage ru; if (get_struct(pid, (void *)args[sc->offset], &ru, sizeof(ru)) != -1) { fprintf(fp, "{ u=%jd.%06ld,s=%jd.%06ld,in=%ld,out=%ld }", (intmax_t)ru.ru_utime.tv_sec, ru.ru_utime.tv_usec, (intmax_t)ru.ru_stime.tv_sec, ru.ru_stime.tv_usec, ru.ru_inblock, ru.ru_oublock); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Rlimit: { struct rlimit rl; if (get_struct(pid, (void *)args[sc->offset], &rl, sizeof(rl)) != -1) { fprintf(fp, "{ cur=%ju,max=%ju }", rl.rlim_cur, rl.rlim_max); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case ExitStatus: { int status; if (get_struct(pid, (void *)args[sc->offset], &status, sizeof(status)) != -1) { fputs("{ ", fp); if (WIFCONTINUED(status)) fputs("CONTINUED", fp); else if (WIFEXITED(status)) fprintf(fp, "EXITED,val=%d", WEXITSTATUS(status)); else if (WIFSIGNALED(status)) fprintf(fp, "SIGNALED,sig=%s%s", strsig2(WTERMSIG(status)), WCOREDUMP(status) ? ",cored" : ""); else fprintf(fp, "STOPPED,sig=%s", strsig2(WTERMSIG(status))); fputs(" }", fp); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Waitoptions: print_mask_arg(sysdecode_wait6_options, fp, args[sc->offset]); break; case Idtype: print_integer_arg(sysdecode_idtype, fp, args[sc->offset]); break; case Procctl: print_integer_arg(sysdecode_procctl_cmd, fp, args[sc->offset]); break; case Umtxop: print_integer_arg(sysdecode_umtx_op, fp, args[sc->offset]); break; case Atfd: print_integer_arg(sysdecode_atfd, fp, args[sc->offset]); break; case Atflags: fputs(xlookup_bits(at_flags, args[sc->offset]), fp); break; case Accessmode: print_mask_arg(sysdecode_access_mode, fp, args[sc->offset]); break; case Sysarch: fputs(xlookup(sysarch_ops, args[sc->offset]), fp); break; case PipeFds: /* * The pipe() system call in the kernel returns its * two file descriptors via return values. However, * the interface exposed by libc is that pipe() * accepts a pointer to an array of descriptors. * Format the output to match the libc API by printing * the returned file descriptors as a fake argument. * * Overwrite the first retval to signal a successful * return as well. */ fprintf(fp, "{ %ld, %ld }", retval[0], retval[1]); retval[0] = 0; break; case Utrace: { size_t len; void *utrace_addr; len = args[sc->offset + 1]; utrace_addr = calloc(1, len); if (get_struct(pid, (void *)args[sc->offset], (void *)utrace_addr, len) != -1) print_utrace(fp, utrace_addr, len); else fprintf(fp, "0x%lx", args[sc->offset]); free(utrace_addr); break; } case IntArray: { int descriptors[16]; unsigned long i, ndescriptors; bool truncated; ndescriptors = args[sc->offset + 1]; truncated = false; if (ndescriptors > nitems(descriptors)) { ndescriptors = nitems(descriptors); truncated = true; } if (get_struct(pid, (void *)args[sc->offset], descriptors, ndescriptors * sizeof(descriptors[0])) != -1) { fprintf(fp, "{"); for (i = 0; i < ndescriptors; i++) fprintf(fp, i == 0 ? " %d" : ", %d", descriptors[i]); fprintf(fp, truncated ? ", ... }" : " }"); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Pipe2: print_mask_arg(sysdecode_pipe2_flags, fp, args[sc->offset]); break; case CapFcntlRights: { uint32_t rights; if (sc->type & OUT) { if (get_struct(pid, (void *)args[sc->offset], &rights, sizeof(rights)) == -1) { fprintf(fp, "0x%lx", args[sc->offset]); break; } } else rights = args[sc->offset]; print_mask_arg32(sysdecode_cap_fcntlrights, fp, rights); break; } case Fadvice: print_integer_arg(sysdecode_fadvice, fp, args[sc->offset]); break; case FileFlags: { fflags_t rem; if (!sysdecode_fileflags(fp, args[sc->offset], &rem)) fprintf(fp, "0x%x", rem); else if (rem != 0) fprintf(fp, "|0x%x", rem); break; } case Flockop: print_mask_arg(sysdecode_flock_operation, fp, args[sc->offset]); break; case Getfsstatmode: print_integer_arg(sysdecode_getfsstat_mode, fp, args[sc->offset]); break; case Kldsymcmd: print_integer_arg(sysdecode_kldsym_cmd, fp, args[sc->offset]); break; case Kldunloadflags: print_integer_arg(sysdecode_kldunload_flags, fp, args[sc->offset]); break; case Madvice: print_integer_arg(sysdecode_madvice, fp, args[sc->offset]); break; case Socklent: fprintf(fp, "%u", (socklen_t)args[sc->offset]); break; case Sockprotocol: { const char *temp; int domain, protocol; domain = args[sc->offset - 2]; protocol = args[sc->offset]; if (protocol == 0) { fputs("0", fp); } else { temp = sysdecode_socket_protocol(domain, protocol); if (temp) { fputs(temp, fp); } else { fprintf(fp, "%d", protocol); } } break; } case Sockoptlevel: print_integer_arg(sysdecode_sockopt_level, fp, args[sc->offset]); break; case Sockoptname: { const char *temp; int level, name; level = args[sc->offset - 1]; name = args[sc->offset]; temp = sysdecode_sockopt_name(level, name); if (temp) { fputs(temp, fp); } else { fprintf(fp, "%d", name); } break; } case Msgflags: print_mask_arg(sysdecode_msg_flags, fp, args[sc->offset]); break; + case CapRights: { + cap_rights_t rights; + + if (get_struct(pid, (void *)args[sc->offset], &rights, + sizeof(rights)) != -1) { + fputs("{ ", fp); + sysdecode_cap_rights(fp, &rights); + fputs(" }", fp); + } else + fprintf(fp, "0x%lx", args[sc->offset]); + break; + } case CloudABIAdvice: fputs(xlookup(cloudabi_advice, args[sc->offset]), fp); break; case CloudABIClockID: fputs(xlookup(cloudabi_clockid, args[sc->offset]), fp); break; case ClouduABIFDSFlags: fputs(xlookup_bits(cloudabi_fdsflags, args[sc->offset]), fp); break; case CloudABIFDStat: { cloudabi_fdstat_t fds; if (get_struct(pid, (void *)args[sc->offset], &fds, sizeof(fds)) != -1) { fprintf(fp, "{ %s, ", xlookup(cloudabi_filetype, fds.fs_filetype)); fprintf(fp, "%s, ... }", xlookup_bits(cloudabi_fdflags, fds.fs_flags)); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case CloudABIFileStat: { cloudabi_filestat_t fsb; if (get_struct(pid, (void *)args[sc->offset], &fsb, sizeof(fsb)) != -1) fprintf(fp, "{ %s, %ju }", xlookup(cloudabi_filetype, fsb.st_filetype), (uintmax_t)fsb.st_size); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case CloudABIFileType: fputs(xlookup(cloudabi_filetype, args[sc->offset]), fp); break; case CloudABIFSFlags: fputs(xlookup_bits(cloudabi_fsflags, args[sc->offset]), fp); break; case CloudABILookup: if ((args[sc->offset] & CLOUDABI_LOOKUP_SYMLINK_FOLLOW) != 0) fprintf(fp, "%d|LOOKUP_SYMLINK_FOLLOW", (int)args[sc->offset]); else fprintf(fp, "%d", (int)args[sc->offset]); break; case CloudABIMFlags: fputs(xlookup_bits(cloudabi_mflags, args[sc->offset]), fp); break; case CloudABIMProt: fputs(xlookup_bits(cloudabi_mprot, args[sc->offset]), fp); break; case CloudABIMSFlags: fputs(xlookup_bits(cloudabi_msflags, args[sc->offset]), fp); break; case CloudABIOFlags: fputs(xlookup_bits(cloudabi_oflags, args[sc->offset]), fp); break; case CloudABISDFlags: fputs(xlookup_bits(cloudabi_sdflags, args[sc->offset]), fp); break; case CloudABISignal: fputs(xlookup(cloudabi_signal, args[sc->offset]), fp); break; case CloudABISockStat: { cloudabi_sockstat_t ss; if (get_struct(pid, (void *)args[sc->offset], &ss, sizeof(ss)) != -1) { fprintf(fp, "{ %s, ", xlookup( cloudabi_sa_family, ss.ss_sockname.sa_family)); fprintf(fp, "%s, ", xlookup( cloudabi_sa_family, ss.ss_peername.sa_family)); fprintf(fp, "%s, ", xlookup( cloudabi_errno, ss.ss_error)); fprintf(fp, "%s }", xlookup_bits( cloudabi_ssstate, ss.ss_state)); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case CloudABISSFlags: fputs(xlookup_bits(cloudabi_ssflags, args[sc->offset]), fp); break; case CloudABITimestamp: fprintf(fp, "%lu.%09lus", args[sc->offset] / 1000000000, args[sc->offset] % 1000000000); break; case CloudABIULFlags: fputs(xlookup_bits(cloudabi_ulflags, args[sc->offset]), fp); break; case CloudABIWhence: fputs(xlookup(cloudabi_whence, args[sc->offset]), fp); break; default: errx(1, "Invalid argument type %d\n", sc->type & ARG_MASK); } fclose(fp); return (tmp); } /* * Print (to outfile) the system call and its arguments. */ void print_syscall(struct trussinfo *trussinfo) { struct threadinfo *t; const char *name; char **s_args; int i, len, nargs; t = trussinfo->curthread; name = t->cs.sc->name; nargs = t->cs.nargs; s_args = t->cs.s_args; len = print_line_prefix(trussinfo); len += fprintf(trussinfo->outfile, "%s(", name); for (i = 0; i < nargs; i++) { if (s_args[i] != NULL) len += fprintf(trussinfo->outfile, "%s", s_args[i]); else len += fprintf(trussinfo->outfile, ""); len += fprintf(trussinfo->outfile, "%s", i < (nargs - 1) ? "," : ""); } len += fprintf(trussinfo->outfile, ")"); for (i = 0; i < 6 - (len / 8); i++) fprintf(trussinfo->outfile, "\t"); } void print_syscall_ret(struct trussinfo *trussinfo, int errorp, long *retval) { struct timespec timediff; struct threadinfo *t; struct syscall *sc; int error; t = trussinfo->curthread; sc = t->cs.sc; if (trussinfo->flags & COUNTONLY) { timespecsubt(&t->after, &t->before, &timediff); timespecadd(&sc->time, &timediff, &sc->time); sc->ncalls++; if (errorp) sc->nerror++; return; } print_syscall(trussinfo); fflush(trussinfo->outfile); if (retval == NULL) { /* * This system call resulted in the current thread's exit, * so there is no return value or error to display. */ fprintf(trussinfo->outfile, "\n"); return; } if (errorp) { error = sysdecode_abi_to_freebsd_errno(t->proc->abi->abi, retval[0]); fprintf(trussinfo->outfile, " ERR#%ld '%s'\n", retval[0], error == INT_MAX ? "Unknown error" : strerror(error)); } #ifndef __LP64__ else if (sc->ret_type == 2) { off_t off; #if _BYTE_ORDER == _LITTLE_ENDIAN off = (off_t)retval[1] << 32 | retval[0]; #else off = (off_t)retval[0] << 32 | retval[1]; #endif fprintf(trussinfo->outfile, " = %jd (0x%jx)\n", (intmax_t)off, (intmax_t)off); } #endif else fprintf(trussinfo->outfile, " = %ld (0x%lx)\n", retval[0], retval[0]); } void print_summary(struct trussinfo *trussinfo) { struct timespec total = {0, 0}; struct syscall *sc; int ncall, nerror; fprintf(trussinfo->outfile, "%-20s%15s%8s%8s\n", "syscall", "seconds", "calls", "errors"); ncall = nerror = 0; STAILQ_FOREACH(sc, &syscalls, entries) if (sc->ncalls) { fprintf(trussinfo->outfile, "%-20s%5jd.%09ld%8d%8d\n", sc->name, (intmax_t)sc->time.tv_sec, sc->time.tv_nsec, sc->ncalls, sc->nerror); timespecadd(&total, &sc->time, &total); ncall += sc->ncalls; nerror += sc->nerror; } fprintf(trussinfo->outfile, "%20s%15s%8s%8s\n", "", "-------------", "-------", "-------"); fprintf(trussinfo->outfile, "%-20s%5jd.%09ld%8d%8d\n", "", (intmax_t)total.tv_sec, total.tv_nsec, ncall, nerror); } Index: projects/clang500-import/usr.sbin/bhyve/Makefile =================================================================== --- projects/clang500-import/usr.sbin/bhyve/Makefile (revision 319548) +++ projects/clang500-import/usr.sbin/bhyve/Makefile (revision 319549) @@ -1,72 +1,80 @@ # # $FreeBSD$ # +.include + PROG= bhyve PACKAGE= bhyve DEBUG_FLAGS= -g -O0 MAN= bhyve.8 BHYVE_SYSDIR?=${SRCTOP} SRCS= \ atkbdc.c \ acpi.c \ bhyvegc.c \ bhyverun.c \ block_if.c \ bootrom.c \ console.c \ consport.c \ dbgport.c \ fwctl.c \ inout.c \ ioapic.c \ mem.c \ mevent.c \ mptbl.c \ pci_ahci.c \ pci_e82545.c \ pci_emul.c \ pci_fbuf.c \ pci_hostbridge.c \ pci_irq.c \ pci_lpc.c \ pci_passthru.c \ pci_virtio_block.c \ pci_virtio_console.c \ pci_virtio_net.c \ pci_virtio_rnd.c \ pci_uart.c \ pci_xhci.c \ pm.c \ post.c \ ps2kbd.c \ ps2mouse.c \ rfb.c \ rtc.c \ smbiostbl.c \ sockstream.c \ task_switch.c \ uart_emul.c \ usb_emul.c \ usb_mouse.c \ virtio.c \ vga.c \ xmsr.c \ spinup_ap.c .PATH: ${BHYVE_SYSDIR}/sys/amd64/vmm SRCS+= vmm_instruction_emul.c LIBADD= vmmapi md pthread z + +.if ${MK_OPENSSL} == "no" +CFLAGS+=-DNO_OPENSSL +.else +LIBADD+= crypto +.endif CFLAGS+= -I${BHYVE_SYSDIR}/sys/dev/e1000 CFLAGS+= -I${BHYVE_SYSDIR}/sys/dev/mii CFLAGS+= -I${BHYVE_SYSDIR}/sys/dev/usb/controller WARNS?= 2 .include Index: projects/clang500-import/usr.sbin/bhyve/bhyve.8 =================================================================== --- projects/clang500-import/usr.sbin/bhyve/bhyve.8 (revision 319548) +++ projects/clang500-import/usr.sbin/bhyve/bhyve.8 (revision 319549) @@ -1,499 +1,504 @@ .\" Copyright (c) 2013 Peter Grehan .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd May 3, 2017 +.Dd June 2, 2017 .Dt BHYVE 8 .Os .Sh NAME .Nm bhyve .Nd "run a guest operating system inside a virtual machine" .Sh SYNOPSIS .Nm .Op Fl abehuwxACHPSWY .Op Fl c Ar numcpus .Op Fl g Ar gdbport .Op Fl l Ar lpcdev Ns Op , Ns Ar conf .Op Fl m Ar memsize Ns Op Ar K|k|M|m|G|g|T|t .Op Fl p Ar vcpu:hostcpu .Op Fl s Ar slot,emulation Ns Op , Ns Ar conf .Op Fl U Ar uuid .Ar vmname .Sh DESCRIPTION .Nm is a hypervisor that runs guest operating systems inside a virtual machine. .Pp Parameters such as the number of virtual CPUs, amount of guest memory, and I/O connectivity can be specified with command-line parameters. .Pp If not using a boot ROM, the guest operating system must be loaded with .Xr bhyveload 8 or a similar boot loader before running .Nm , otherwise, it is enough to run .Nm with a boot ROM of choice. .Pp .Nm runs until the guest operating system reboots or an unhandled hypervisor exit is detected. .Sh OPTIONS .Bl -tag -width 10n .It Fl a The guest's local APIC is configured in xAPIC mode. The xAPIC mode is the default setting so this option is redundant. It will be deprecated in a future version. .It Fl A Generate ACPI tables. Required for .Fx Ns /amd64 guests. .It Fl b Enable a low-level console device supported by .Fx kernels compiled with .Cd "device bvmconsole" . This option will be deprecated in a future version. .It Fl c Ar numcpus Number of guest virtual CPUs. The default is 1 and the maximum is 16. .It Fl C Include guest memory in core file. .It Fl e Force .Nm to exit when a guest issues an access to an I/O port that is not emulated. This is intended for debug purposes. .It Fl g Ar gdbport For .Fx kernels compiled with .Cd "device bvmdebug" , allow a remote kernel kgdb to be relayed to the guest kernel gdb stub via a local IPv4 address and this port. This option will be deprecated in a future version. .It Fl h Print help message and exit. .It Fl H Yield the virtual CPU thread when a HLT instruction is detected. If this option is not specified, virtual CPUs will use 100% of a host CPU. .It Fl l Ar lpcdev Ns Op , Ns Ar conf Allow devices behind the LPC PCI-ISA bridge to be configured. The only supported devices are the TTY-class devices .Ar com1 and .Ar com2 and the boot ROM device .Ar bootrom . .It Fl m Ar memsize Ns Op Ar K|k|M|m|G|g|T|t Guest physical memory size in bytes. This must be the same size that was given to .Xr bhyveload 8 . .Pp The size argument may be suffixed with one of K, M, G or T (either upper or lower case) to indicate a multiple of kilobytes, megabytes, gigabytes, or terabytes. If no suffix is given, the value is assumed to be in megabytes. .Pp .Ar memsize defaults to 256M. .It Fl p Ar vcpu:hostcpu Pin guest's virtual CPU .Em vcpu to .Em hostcpu . .It Fl P Force the guest virtual CPU to exit when a PAUSE instruction is detected. .It Fl s Ar slot,emulation Ns Op , Ns Ar conf Configure a virtual PCI slot and function. .Pp .Nm provides PCI bus emulation and virtual devices that can be attached to slots on the bus. There are 32 available slots, with the option of providing up to 8 functions per slot. .Bl -tag -width 10n .It Ar slot .Ar pcislot[:function] .Ar bus:pcislot:function .Pp The .Ar pcislot value is 0 to 31. The optional .Ar function value is 0 to 7. The optional .Ar bus value is 0 to 255. If not specified, the .Ar function value defaults to 0. If not specified, the .Ar bus value defaults to 0. .It Ar emulation .Bl -tag -width 10n .It Li hostbridge | Li amd_hostbridge .Pp Provide a simple host bridge. This is usually configured at slot 0, and is required by most guest operating systems. The .Li amd_hostbridge emulation is identical but uses a PCI vendor ID of .Li AMD . .It Li passthru PCI pass-through device. .It Li virtio-net Virtio network interface. .It Li virtio-blk Virtio block storage interface. .It Li virtio-rnd Virtio RNG interface. .It Li virtio-console Virtio console interface, which exposes multiple ports to the guest in the form of simple char devices for simple IO between the guest and host userspaces. .It Li ahci AHCI controller attached to arbitrary devices. .It Li ahci-cd AHCI controller attached to an ATAPI CD/DVD. .It Li ahci-hd AHCI controller attached to a SATA hard-drive. .It Li e1000 Intel e82545 network interface. .It Li uart PCI 16550 serial device. .It Li lpc LPC PCI-ISA bridge with COM1 and COM2 16550 serial ports and a boot ROM. The LPC bridge emulation can only be configured on bus 0. .It Li fbuf Raw framebuffer device attached to VNC server. .It Li xhci eXtensible Host Controller Interface (xHCI) USB controller. .El .It Op Ar conf This optional parameter describes the backend for device emulations. If .Ar conf is not specified, the device emulation has no backend and can be considered unconnected. .Pp Network devices: .Bl -tag -width 10n .It Ar tapN Ns Op , Ns Ar mac=xx:xx:xx:xx:xx:xx .It Ar vmnetN Ns Op , Ns Ar mac=xx:xx:xx:xx:xx:xx .Pp If .Ar mac is not specified, the MAC address is derived from a fixed OUI and the remaining bytes from an MD5 hash of the slot and function numbers and the device name. .Pp The MAC address is an ASCII string in .Xr ethers 5 format. .El .Pp Block storage devices: .Bl -tag -width 10n .It Pa /filename Ns Oo , Ns Ar block-device-options Oc .It Pa /dev/xxx Ns Oo , Ns Ar block-device-options Oc .El .Pp The .Ar block-device-options are: .Bl -tag -width 8n .It Li nocache Open the file with .Dv O_DIRECT . .It Li direct Open the file using .Dv O_SYNC . .It Li ro Force the file to be opened read-only. .It Li sectorsize= Ns Ar logical Ns Oo / Ns Ar physical Oc Specify the logical and physical sector sizes of the emulated disk. The physical sector size is optional and is equal to the logical sector size if not explicitly specified. .El .Pp TTY devices: .Bl -tag -width 10n .It Li stdio Connect the serial port to the standard input and output of the .Nm process. .It Pa /dev/xxx Use the host TTY device for serial port I/O. .El .Pp Boot ROM device: .Bl -tag -width 10n .It Pa romfile Map .Ar romfile in the guest address space reserved for boot firmware. .El .Pp Pass-through devices: .Bl -tag -width 10n .It Ns Ar slot Ns / Ns Ar bus Ns / Ns Ar function Connect to a PCI device on the host at the selector described by .Ar slot , .Ar bus , and .Ar function numbers. .El .Pp Guest memory must be wired using the .Fl S option when a pass-through device is configured. .Pp The host device must have been reserved at boot-time using the .Va pptdev loader variable as described in .Xr vmm 4 . .Pp Virtio console devices: .Bl -tag -width 10n .It Li port1= Ns Pa /path/to/port1.sock Ns ,anotherport= Ns Pa ... A maximum of 16 ports per device can be created. Every port is named and corresponds to a Unix domain socket created by .Nm . .Nm accepts at most one connection per port at a time. .Pp Limitations: .Bl -bullet -offset 2n .It Due to lack of destructors in .Nm , sockets on the filesystem must be cleaned up manually after .Nm exits. .It There is no way to use the "console port" feature, nor the console port resize at present. .It Emergency write is advertised, but no-op at present. .El .El .Pp Framebuffer devices: .Bl -tag -width 10n -.It Oo rfb= Ns Oo Ar IP: Oc Ns Ar port Oc Ns Oo ,w= Ns Ar width Oc Ns Oo ,h= Ns Ar height Oc Ns Oo ,vga= Ns Ar vgaconf Oc Ns Oo Ns ,wait Oc +.It Oo rfb= Ns Oo Ar IP: Oc Ns Ar port Oc Ns Oo ,w= Ns Ar width Oc Ns Oo ,h= Ns Ar height Oc Ns Oo ,vga= Ns Ar vgaconf Oc Ns Oo Ns ,wait Oc Ns Oo ,password= Ns Ar password Oc .Bl -tag -width 8n .It Ar IP:port An .Ar IP address and a .Ar port VNC should listen on. The default is to listen on localhost IPv4 address and default VNC port 5900. Listening on an IPv6 address is not supported. .It Ar width No and Ar height A display resolution, width and height, respectively. If not specified, a default resolution of 1024x768 pixels will be used. Minimal supported resolution is 640x480 pixels, and maximum is 1920x1200 pixels. .It Ar vgaconf Possible values for this option are .Dq io (default), .Dq on , and .Dq off . PCI graphics cards have a dual personality in that they are standard PCI devices with BAR addressing, but may also implicitly decode legacy VGA I/O space .Pq Ad 0x3c0-3df and memory space .Pq 64KB at Ad 0xA0000 . The default .Dq io option should be used for guests that attempt to issue BIOS calls which result in I/O port queries, and fail to boot if I/O decode is disabled. .Pp The .Dq on option should be used along with the CSM BIOS capability in UEFI to boot traditional BIOS guests that require the legacy VGA I/O and memory regions to be available. .Pp The .Dq off option should be used for the UEFI guests that assume that VGA adapter is present if they detect the I/O ports. An example of such a guest is .Ox in UEFI mode. .Pp Please refer to the .Nm .Fx wiki page .Pq Lk https://wiki.freebsd.org/bhyve for configuration notes of particular guests. .It wait Instruct .Nm to only boot upon the initiation of a VNC connection, simplifying the installation of operating systems that require immediate keyboard input. This can be removed for post-installation use. +.It password +This type of authentication is known to be cryptographically weak and is not +intended for use on untrusted networks. +Many implementations will want to use stronger security, such as running +the session over an encrypted channel provided by IPsec or SSH. .El .El .Pp xHCI USB devices: .Bl -tag -width 10n .It Li tablet A USB tablet device which provides precise cursor synchronization when using VNC. .El .El .It Fl S Wire guest memory. .It Fl u RTC keeps UTC time. .It Fl U Ar uuid Set the universally unique identifier .Pq UUID in the guest's System Management BIOS System Information structure. By default a UUID is generated from the host's hostname and .Ar vmname . .It Fl w Ignore accesses to unimplemented Model Specific Registers (MSRs). This is intended for debug purposes. .It Fl W Force virtio PCI device emulations to use MSI interrupts instead of MSI-X interrupts. .It Fl x The guest's local APIC is configured in x2APIC mode. .It Fl Y Disable MPtable generation. .It Ar vmname Alphanumeric name of the guest. This should be the same as that created by .Xr bhyveload 8 . .El .Sh SIGNAL HANDLING .Nm deals with the following signals: .Pp .Bl -tag -width indent -compact .It SIGTERM Trigger ACPI poweroff for a VM .El .Sh EXIT STATUS Exit status indicates how the VM was terminated: .Pp .Bl -tag -width indent -compact .It 0 rebooted .It 1 powered off .It 2 halted .It 3 triple fault .El .Sh EXAMPLES If not using a boot ROM, the guest operating system must have been loaded with .Xr bhyveload 8 or a similar boot loader before .Xr bhyve 4 can be run. Otherwise, the boot loader is not needed. .Pp To run a virtual machine with 1GB of memory, two virtual CPUs, a virtio block device backed by the .Pa /my/image filesystem image, and a serial port for the console: .Bd -literal -offset indent bhyve -c 2 -s 0,hostbridge -s 1,lpc -s 2,virtio-blk,/my/image \\ -l com1,stdio -A -H -P -m 1G vm1 .Ed .Pp Run a 24GB single-CPU virtual machine with three network ports, one of which has a MAC address specified: .Bd -literal -offset indent bhyve -s 0,hostbridge -s 1,lpc -s 2:0,virtio-net,tap0 \\ -s 2:1,virtio-net,tap1 \\ -s 2:2,virtio-net,tap2,mac=00:be:fa:76:45:00 \\ -s 3,virtio-blk,/my/image -l com1,stdio \\ -A -H -P -m 24G bigvm .Ed .Pp Run an 8GB quad-CPU virtual machine with 8 AHCI SATA disks, an AHCI ATAPI CD-ROM, a single virtio network port, an AMD hostbridge, and the console port connected to an .Xr nmdm 4 null-modem device. .Bd -literal -offset indent bhyve -c 4 \\ -s 0,amd_hostbridge -s 1,lpc \\ -s 1:0,ahci,hd:/images/disk.1,hd:/images/disk.2,\\ hd:/images/disk.3,hd:/images/disk.4,\\ hd:/images/disk.5,hd:/images/disk.6,\\ hd:/images/disk.7,hd:/images/disk.8,\\ cd:/images/install.iso \\ -s 3,virtio-net,tap0 \\ -l com1,/dev/nmdm0A \\ -A -H -P -m 8G .Ed .Pp Run a UEFI virtual machine with a display resolution of 800 by 600 pixels that can be accessed via VNC at: 0.0.0.0:5900. .Bd -literal -offset indent bhyve -c 2 -m 4G -w -H \\ -s 0,hostbridge \\ -s 3,ahci-cd,/path/to/uefi-OS-install.iso \\ -s 4,ahci-hd,disk.img \\ -s 5,virtio-net,tap0 \\ -s 29,fbuf,tcp=0.0.0.0:5900,w=800,h=600,wait \\ -s 30,xhci,tablet \\ -s 31,lpc -l com1,stdio \\ -l bootrom,/usr/local/share/uefi-firmware/BHYVE_UEFI.fd \\ uefivm .Ed .Sh SEE ALSO .Xr bhyve 4 , .Xr nmdm 4 , .Xr vmm 4 , .Xr ethers 5 , .Xr bhyvectl 8 , .Xr bhyveload 8 .Sh HISTORY .Nm first appeared in .Fx 10.0 . .Sh AUTHORS .An Neel Natu Aq Mt neel@freebsd.org .An Peter Grehan Aq Mt grehan@freebsd.org Index: projects/clang500-import/usr.sbin/bhyve/pci_fbuf.c =================================================================== --- projects/clang500-import/usr.sbin/bhyve/pci_fbuf.c (revision 319548) +++ projects/clang500-import/usr.sbin/bhyve/pci_fbuf.c (revision 319549) @@ -1,424 +1,426 @@ /*- * Copyright (c) 2015 Nahanni Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "bhyvegc.h" #include "bhyverun.h" #include "console.h" #include "inout.h" #include "pci_emul.h" #include "rfb.h" #include "vga.h" /* * bhyve Framebuffer device emulation. * BAR0 points to the current mode information. * BAR1 is the 32-bit framebuffer address. * * -s ,fbuf,wait,vga=on|io|off,rfb=:port,w=width,h=height */ static int fbuf_debug = 1; #define DEBUG_INFO 1 #define DEBUG_VERBOSE 4 #define DPRINTF(level, params) if (level <= fbuf_debug) printf params #define KB (1024UL) #define MB (1024 * 1024UL) #define DMEMSZ 128 #define FB_SIZE (16*MB) #define COLS_MAX 1920 #define ROWS_MAX 1200 #define COLS_DEFAULT 1024 #define ROWS_DEFAULT 768 #define COLS_MIN 640 #define ROWS_MIN 480 struct pci_fbuf_softc { struct pci_devinst *fsc_pi; struct { uint32_t fbsize; uint16_t width; uint16_t height; uint16_t depth; uint16_t refreshrate; uint8_t reserved[116]; } __packed memregs; /* rfb server */ char *rfb_host; + char *rfb_password; int rfb_port; int rfb_wait; int vga_enabled; int vga_full; uint32_t fbaddr; char *fb_base; uint16_t gc_width; uint16_t gc_height; void *vgasc; struct bhyvegc_image *gc_image; }; static struct pci_fbuf_softc *fbuf_sc; #define PCI_FBUF_MSI_MSGS 4 static void pci_fbuf_usage(char *opt) { fprintf(stderr, "Invalid fbuf emulation \"%s\"\r\n", opt); fprintf(stderr, "fbuf: {wait,}{vga=on|io|off,}rfb=:port\r\n"); } static void pci_fbuf_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset, int size, uint64_t value) { struct pci_fbuf_softc *sc; uint8_t *p; assert(baridx == 0); sc = pi->pi_arg; DPRINTF(DEBUG_VERBOSE, ("fbuf wr: offset 0x%lx, size: %d, value: 0x%lx\n", offset, size, value)); if (offset + size > DMEMSZ) { printf("fbuf: write too large, offset %ld size %d\n", offset, size); return; } p = (uint8_t *)&sc->memregs + offset; switch (size) { case 1: *p = value; break; case 2: *(uint16_t *)p = value; break; case 4: *(uint32_t *)p = value; break; case 8: *(uint64_t *)p = value; break; default: printf("fbuf: write unknown size %d\n", size); break; } if (!sc->gc_image->vgamode && sc->memregs.width == 0 && sc->memregs.height == 0) { DPRINTF(DEBUG_INFO, ("switching to VGA mode\r\n")); sc->gc_image->vgamode = 1; sc->gc_width = 0; sc->gc_height = 0; } else if (sc->gc_image->vgamode && sc->memregs.width != 0 && sc->memregs.height != 0) { DPRINTF(DEBUG_INFO, ("switching to VESA mode\r\n")); sc->gc_image->vgamode = 0; } } uint64_t pci_fbuf_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset, int size) { struct pci_fbuf_softc *sc; uint8_t *p; uint64_t value; assert(baridx == 0); sc = pi->pi_arg; if (offset + size > DMEMSZ) { printf("fbuf: read too large, offset %ld size %d\n", offset, size); return (0); } p = (uint8_t *)&sc->memregs + offset; value = 0; switch (size) { case 1: value = *p; break; case 2: value = *(uint16_t *)p; break; case 4: value = *(uint32_t *)p; break; case 8: value = *(uint64_t *)p; break; default: printf("fbuf: read unknown size %d\n", size); break; } DPRINTF(DEBUG_VERBOSE, ("fbuf rd: offset 0x%lx, size: %d, value: 0x%lx\n", offset, size, value)); return (value); } static int pci_fbuf_parse_opts(struct pci_fbuf_softc *sc, char *opts) { char *uopts, *xopts, *config; char *tmpstr; int ret; ret = 0; uopts = strdup(opts); for (xopts = strtok(uopts, ","); xopts != NULL; xopts = strtok(NULL, ",")) { if (strcmp(xopts, "wait") == 0) { sc->rfb_wait = 1; continue; } if ((config = strchr(xopts, '=')) == NULL) { pci_fbuf_usage(xopts); ret = -1; goto done; } *config++ = '\0'; DPRINTF(DEBUG_VERBOSE, ("pci_fbuf option %s = %s\r\n", xopts, config)); if (!strcmp(xopts, "tcp") || !strcmp(xopts, "rfb")) { /* parse host-ip:port */ tmpstr = strsep(&config, ":"); if (!config) sc->rfb_port = atoi(tmpstr); else { sc->rfb_port = atoi(config); sc->rfb_host = tmpstr; } } else if (!strcmp(xopts, "vga")) { if (!strcmp(config, "off")) { sc->vga_enabled = 0; } else if (!strcmp(config, "io")) { sc->vga_enabled = 1; sc->vga_full = 0; } else if (!strcmp(config, "on")) { sc->vga_enabled = 1; sc->vga_full = 1; } else { pci_fbuf_usage(opts); ret = -1; goto done; } } else if (!strcmp(xopts, "w")) { sc->memregs.width = atoi(config); if (sc->memregs.width > COLS_MAX) { pci_fbuf_usage(xopts); ret = -1; goto done; } else if (sc->memregs.width == 0) sc->memregs.width = 1920; } else if (!strcmp(xopts, "h")) { sc->memregs.height = atoi(config); if (sc->memregs.height > ROWS_MAX) { pci_fbuf_usage(xopts); ret = -1; goto done; } else if (sc->memregs.height == 0) sc->memregs.height = 1080; - + } else if (!strcmp(xopts, "password")) { + sc->rfb_password = config; } else { pci_fbuf_usage(xopts); ret = -1; goto done; } } done: return (ret); } extern void vga_render(struct bhyvegc *gc, void *arg); void pci_fbuf_render(struct bhyvegc *gc, void *arg) { struct pci_fbuf_softc *sc; sc = arg; if (sc->vga_full && sc->gc_image->vgamode) { /* TODO: mode switching to vga and vesa should use the special * EFI-bhyve protocol port. */ vga_render(gc, sc->vgasc); return; } if (sc->gc_width != sc->memregs.width || sc->gc_height != sc->memregs.height) { bhyvegc_resize(gc, sc->memregs.width, sc->memregs.height); sc->gc_width = sc->memregs.width; sc->gc_height = sc->memregs.height; } return; } static int pci_fbuf_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) { int error, prot; struct pci_fbuf_softc *sc; if (fbuf_sc != NULL) { fprintf(stderr, "Only one frame buffer device is allowed.\n"); return (-1); } sc = calloc(1, sizeof(struct pci_fbuf_softc)); pi->pi_arg = sc; /* initialize config space */ pci_set_cfgdata16(pi, PCIR_DEVICE, 0x40FB); pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D); pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_DISPLAY); pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_DISPLAY_VGA); error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, DMEMSZ); assert(error == 0); error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, FB_SIZE); assert(error == 0); error = pci_emul_add_msicap(pi, PCI_FBUF_MSI_MSGS); assert(error == 0); sc->fbaddr = pi->pi_bar[1].addr; sc->memregs.fbsize = FB_SIZE; sc->memregs.width = COLS_DEFAULT; sc->memregs.height = ROWS_DEFAULT; sc->memregs.depth = 32; sc->vga_enabled = 1; sc->vga_full = 0; sc->fsc_pi = pi; error = pci_fbuf_parse_opts(sc, opts); if (error != 0) goto done; /* XXX until VGA rendering is enabled */ if (sc->vga_full != 0) { fprintf(stderr, "pci_fbuf: VGA rendering not enabled"); goto done; } sc->fb_base = vm_create_devmem(ctx, VM_FRAMEBUFFER, "framebuffer", FB_SIZE); if (sc->fb_base == MAP_FAILED) { error = -1; goto done; } DPRINTF(DEBUG_INFO, ("fbuf frame buffer base: %p [sz %lu]\r\n", sc->fb_base, FB_SIZE)); /* * Map the framebuffer into the guest address space. * XXX This may fail if the BAR is different than a prior * run. In this case flag the error. This will be fixed * when a change_memseg api is available. */ prot = PROT_READ | PROT_WRITE; if (vm_mmap_memseg(ctx, sc->fbaddr, VM_FRAMEBUFFER, 0, FB_SIZE, prot) != 0) { fprintf(stderr, "pci_fbuf: mapseg failed - try deleting VM and restarting\n"); error = -1; goto done; } console_init(sc->memregs.width, sc->memregs.height, sc->fb_base); console_fb_register(pci_fbuf_render, sc); if (sc->vga_enabled) sc->vgasc = vga_init(!sc->vga_full); sc->gc_image = console_get_image(); fbuf_sc = sc; memset((void *)sc->fb_base, 0, FB_SIZE); - error = rfb_init(sc->rfb_host, sc->rfb_port, sc->rfb_wait); + error = rfb_init(sc->rfb_host, sc->rfb_port, sc->rfb_wait, sc->rfb_password); done: if (error) free(sc); return (error); } struct pci_devemu pci_fbuf = { .pe_emu = "fbuf", .pe_init = pci_fbuf_init, .pe_barwrite = pci_fbuf_write, .pe_barread = pci_fbuf_read }; PCI_EMUL_SET(pci_fbuf); Property changes on: projects/clang500-import/usr.sbin/bhyve/pci_fbuf.c ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/bhyve/pci_fbuf.c:r317808-319547 Index: projects/clang500-import/usr.sbin/bhyve/rfb.c =================================================================== --- projects/clang500-import/usr.sbin/bhyve/rfb.c (revision 319548) +++ projects/clang500-import/usr.sbin/bhyve/rfb.c (revision 319549) @@ -1,941 +1,1041 @@ /*- * Copyright (c) 2015 Tycho Nightingale * Copyright (c) 2015 Leon Dang * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifndef WITHOUT_CAPSICUM #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhyvegc.h" #include "console.h" #include "rfb.h" #include "sockstream.h" +#ifndef NO_OPENSSL +#include +#endif + static int rfb_debug = 0; #define DPRINTF(params) if (rfb_debug) printf params #define WPRINTF(params) printf params +#define AUTH_LENGTH 16 +#define PASSWD_LENGTH 8 + +#define SECURITY_TYPE_NONE 1 +#define SECURITY_TYPE_VNC_AUTH 2 + +#define AUTH_FAILED_UNAUTH 1 +#define AUTH_FAILED_ERROR 2 + struct rfb_softc { int sfd; pthread_t tid; int cfd; int width, height; - bool enc_raw_ok; - bool enc_zlib_ok; - bool enc_resize_ok; + char *password; + bool enc_raw_ok; + bool enc_zlib_ok; + bool enc_resize_ok; + z_stream zstream; uint8_t *zbuf; int zbuflen; int conn_wait; int sending; pthread_mutex_t mtx; pthread_cond_t cond; int hw_crc; uint32_t *crc; /* WxH crc cells */ uint32_t *crc_tmp; /* buffer to store single crc row */ int crc_width, crc_height; }; struct rfb_pixfmt { uint8_t bpp; uint8_t depth; uint8_t bigendian; uint8_t truecolor; uint16_t red_max; uint16_t green_max; uint16_t blue_max; uint8_t red_shift; uint8_t green_shift; uint8_t blue_shift; uint8_t pad[3]; }; struct rfb_srvr_info { uint16_t width; uint16_t height; struct rfb_pixfmt pixfmt; uint32_t namelen; }; struct rfb_pixfmt_msg { uint8_t type; uint8_t pad[3]; struct rfb_pixfmt pixfmt; }; #define RFB_ENCODING_RAW 0 #define RFB_ENCODING_ZLIB 6 #define RFB_ENCODING_RESIZE -223 #define RFB_MAX_WIDTH 2000 #define RFB_MAX_HEIGHT 1200 #define RFB_ZLIB_BUFSZ RFB_MAX_WIDTH*RFB_MAX_HEIGHT*4 /* percentage changes to screen before sending the entire screen */ #define RFB_SEND_ALL_THRESH 25 struct rfb_enc_msg { uint8_t type; uint8_t pad; uint16_t numencs; }; struct rfb_updt_msg { uint8_t type; uint8_t incremental; uint16_t x; uint16_t y; uint16_t width; uint16_t height; }; struct rfb_key_msg { uint8_t type; uint8_t down; uint16_t pad; uint32_t code; }; struct rfb_ptr_msg { uint8_t type; uint8_t button; uint16_t x; uint16_t y; }; struct rfb_srvr_updt_msg { uint8_t type; uint8_t pad; uint16_t numrects; }; struct rfb_srvr_rect_hdr { uint16_t x; uint16_t y; uint16_t width; uint16_t height; uint32_t encoding; }; struct rfb_cuttext_msg { uint8_t type; uint8_t padding[3]; uint32_t length; }; static void rfb_send_server_init_msg(int cfd) { struct bhyvegc_image *gc_image; struct rfb_srvr_info sinfo; gc_image = console_get_image(); sinfo.width = htons(gc_image->width); sinfo.height = htons(gc_image->height); sinfo.pixfmt.bpp = 32; sinfo.pixfmt.depth = 32; sinfo.pixfmt.bigendian = 0; sinfo.pixfmt.truecolor = 1; sinfo.pixfmt.red_max = htons(255); sinfo.pixfmt.green_max = htons(255); sinfo.pixfmt.blue_max = htons(255); sinfo.pixfmt.red_shift = 16; sinfo.pixfmt.green_shift = 8; sinfo.pixfmt.blue_shift = 0; sinfo.namelen = htonl(strlen("bhyve")); (void)stream_write(cfd, &sinfo, sizeof(sinfo)); (void)stream_write(cfd, "bhyve", strlen("bhyve")); } static void rfb_send_resize_update_msg(struct rfb_softc *rc, int cfd) { struct rfb_srvr_updt_msg supdt_msg; - struct rfb_srvr_rect_hdr srect_hdr; + struct rfb_srvr_rect_hdr srect_hdr; /* Number of rectangles: 1 */ supdt_msg.type = 0; supdt_msg.pad = 0; supdt_msg.numrects = htons(1); stream_write(cfd, &supdt_msg, sizeof(struct rfb_srvr_updt_msg)); /* Rectangle header */ srect_hdr.x = htons(0); srect_hdr.y = htons(0); srect_hdr.width = htons(rc->width); srect_hdr.height = htons(rc->height); srect_hdr.encoding = htonl(RFB_ENCODING_RESIZE); stream_write(cfd, &srect_hdr, sizeof(struct rfb_srvr_rect_hdr)); } static void rfb_recv_set_pixfmt_msg(struct rfb_softc *rc, int cfd) { struct rfb_pixfmt_msg pixfmt_msg; (void)stream_read(cfd, ((void *)&pixfmt_msg)+1, sizeof(pixfmt_msg)-1); } static void rfb_recv_set_encodings_msg(struct rfb_softc *rc, int cfd) { struct rfb_enc_msg enc_msg; int i; uint32_t encoding; assert((sizeof(enc_msg) - 1) == 3); (void)stream_read(cfd, ((void *)&enc_msg)+1, sizeof(enc_msg)-1); for (i = 0; i < htons(enc_msg.numencs); i++) { (void)stream_read(cfd, &encoding, sizeof(encoding)); switch (htonl(encoding)) { case RFB_ENCODING_RAW: rc->enc_raw_ok = true; break; case RFB_ENCODING_ZLIB: rc->enc_zlib_ok = true; deflateInit(&rc->zstream, Z_BEST_SPEED); break; case RFB_ENCODING_RESIZE: rc->enc_resize_ok = true; break; } } } /* * Calculate CRC32 using SSE4.2; Intel or AMD Bulldozer+ CPUs only */ static __inline uint32_t fast_crc32(void *buf, int len, uint32_t crcval) { uint32_t q = len / sizeof(uint32_t); uint32_t *p = (uint32_t *)buf; while (q--) { asm volatile ( ".byte 0xf2, 0xf, 0x38, 0xf1, 0xf1;" :"=S" (crcval) :"0" (crcval), "c" (*p) ); p++; } return (crcval); } static int rfb_send_rect(struct rfb_softc *rc, int cfd, struct bhyvegc_image *gc, int x, int y, int w, int h) { struct rfb_srvr_updt_msg supdt_msg; struct rfb_srvr_rect_hdr srect_hdr; unsigned long zlen; ssize_t nwrite, total; int err; uint32_t *p; uint8_t *zbufp; /* * Send a single rectangle of the given x, y, w h dimensions. */ /* Number of rectangles: 1 */ supdt_msg.type = 0; supdt_msg.pad = 0; supdt_msg.numrects = htons(1); nwrite = stream_write(cfd, &supdt_msg, sizeof(struct rfb_srvr_updt_msg)); if (nwrite <= 0) return (nwrite); /* Rectangle header */ srect_hdr.x = htons(x); srect_hdr.y = htons(y); srect_hdr.width = htons(w); srect_hdr.height = htons(h); h = y + h; w *= sizeof(uint32_t); if (rc->enc_zlib_ok) { zbufp = rc->zbuf; rc->zstream.total_in = 0; rc->zstream.total_out = 0; for (p = &gc->data[y * gc->width + x]; y < h; y++) { rc->zstream.next_in = (Bytef *)p; rc->zstream.avail_in = w; rc->zstream.next_out = (Bytef *)zbufp; rc->zstream.avail_out = RFB_ZLIB_BUFSZ + 16 - rc->zstream.total_out; rc->zstream.data_type = Z_BINARY; /* Compress with zlib */ err = deflate(&rc->zstream, Z_SYNC_FLUSH); if (err != Z_OK) { WPRINTF(("zlib[rect] deflate err: %d\n", err)); rc->enc_zlib_ok = false; deflateEnd(&rc->zstream); goto doraw; } zbufp = rc->zbuf + rc->zstream.total_out; p += gc->width; } srect_hdr.encoding = htonl(RFB_ENCODING_ZLIB); nwrite = stream_write(cfd, &srect_hdr, sizeof(struct rfb_srvr_rect_hdr)); if (nwrite <= 0) return (nwrite); zlen = htonl(rc->zstream.total_out); nwrite = stream_write(cfd, &zlen, sizeof(uint32_t)); if (nwrite <= 0) return (nwrite); return (stream_write(cfd, rc->zbuf, rc->zstream.total_out)); } doraw: total = 0; zbufp = rc->zbuf; for (p = &gc->data[y * gc->width + x]; y < h; y++) { memcpy(zbufp, p, w); zbufp += w; total += w; p += gc->width; } srect_hdr.encoding = htonl(RFB_ENCODING_RAW); nwrite = stream_write(cfd, &srect_hdr, sizeof(struct rfb_srvr_rect_hdr)); if (nwrite <= 0) return (nwrite); total = stream_write(cfd, rc->zbuf, total); return (total); } static int rfb_send_all(struct rfb_softc *rc, int cfd, struct bhyvegc_image *gc) { struct rfb_srvr_updt_msg supdt_msg; struct rfb_srvr_rect_hdr srect_hdr; ssize_t nwrite; unsigned long zlen; int err; /* * Send the whole thing */ /* Number of rectangles: 1 */ supdt_msg.type = 0; supdt_msg.pad = 0; supdt_msg.numrects = htons(1); nwrite = stream_write(cfd, &supdt_msg, sizeof(struct rfb_srvr_updt_msg)); if (nwrite <= 0) return (nwrite); /* Rectangle header */ srect_hdr.x = 0; srect_hdr.y = 0; srect_hdr.width = htons(gc->width); srect_hdr.height = htons(gc->height); if (rc->enc_zlib_ok) { rc->zstream.next_in = (Bytef *)gc->data; rc->zstream.avail_in = gc->width * gc->height * sizeof(uint32_t); rc->zstream.next_out = (Bytef *)rc->zbuf; rc->zstream.avail_out = RFB_ZLIB_BUFSZ + 16; rc->zstream.data_type = Z_BINARY; rc->zstream.total_in = 0; rc->zstream.total_out = 0; /* Compress with zlib */ err = deflate(&rc->zstream, Z_SYNC_FLUSH); if (err != Z_OK) { WPRINTF(("zlib deflate err: %d\n", err)); rc->enc_zlib_ok = false; deflateEnd(&rc->zstream); goto doraw; } srect_hdr.encoding = htonl(RFB_ENCODING_ZLIB); nwrite = stream_write(cfd, &srect_hdr, sizeof(struct rfb_srvr_rect_hdr)); if (nwrite <= 0) return (nwrite); zlen = htonl(rc->zstream.total_out); nwrite = stream_write(cfd, &zlen, sizeof(uint32_t)); if (nwrite <= 0) return (nwrite); return (stream_write(cfd, rc->zbuf, rc->zstream.total_out)); } doraw: srect_hdr.encoding = htonl(RFB_ENCODING_RAW); nwrite = stream_write(cfd, &srect_hdr, sizeof(struct rfb_srvr_rect_hdr)); if (nwrite <= 0) return (nwrite); nwrite = stream_write(cfd, gc->data, gc->width * gc->height * sizeof(uint32_t)); return (nwrite); } #define PIX_PER_CELL 32 #define PIXCELL_SHIFT 5 #define PIXCELL_MASK 0x1F static int rfb_send_screen(struct rfb_softc *rc, int cfd, int all) { struct bhyvegc_image *gc_image; ssize_t nwrite; int x, y; int celly, cellwidth; int xcells, ycells; int w, h; uint32_t *p; int rem_x, rem_y; /* remainder for resolutions not x32 pixels ratio */ int retval; uint32_t *crc_p, *orig_crc; int changes; console_refresh(); gc_image = console_get_image(); pthread_mutex_lock(&rc->mtx); if (rc->sending) { pthread_mutex_unlock(&rc->mtx); return (1); } rc->sending = 1; pthread_mutex_unlock(&rc->mtx); retval = 0; if (all) { retval = rfb_send_all(rc, cfd, gc_image); goto done; } /* * Calculate the checksum for each 32x32 cell. Send each that * has changed since the last scan. */ /* Resolution changed */ rc->crc_width = gc_image->width; rc->crc_height = gc_image->height; w = rc->crc_width; h = rc->crc_height; xcells = howmany(rc->crc_width, PIX_PER_CELL); ycells = howmany(rc->crc_height, PIX_PER_CELL); rem_x = w & PIXCELL_MASK; rem_y = h & PIXCELL_MASK; if (!rem_y) rem_y = PIX_PER_CELL; p = gc_image->data; /* * Go through all cells and calculate crc. If significant number * of changes, then send entire screen. * crc_tmp is dual purpose: to store the new crc and to flag as * a cell that has changed. */ crc_p = rc->crc_tmp - xcells; orig_crc = rc->crc - xcells; changes = 0; memset(rc->crc_tmp, 0, sizeof(uint32_t) * xcells * ycells); for (y = 0; y < h; y++) { if ((y & PIXCELL_MASK) == 0) { crc_p += xcells; orig_crc += xcells; } for (x = 0; x < xcells; x++) { if (rc->hw_crc) crc_p[x] = fast_crc32(p, PIX_PER_CELL * sizeof(uint32_t), crc_p[x]); else crc_p[x] = (uint32_t)crc32(crc_p[x], (Bytef *)p, PIX_PER_CELL * sizeof(uint32_t)); p += PIX_PER_CELL; /* check for crc delta if last row in cell */ if ((y & PIXCELL_MASK) == PIXCELL_MASK || y == (h-1)) { if (orig_crc[x] != crc_p[x]) { orig_crc[x] = crc_p[x]; crc_p[x] = 1; changes++; } else { crc_p[x] = 0; } } } if (rem_x) { if (rc->hw_crc) crc_p[x] = fast_crc32(p, rem_x * sizeof(uint32_t), crc_p[x]); else crc_p[x] = (uint32_t)crc32(crc_p[x], (Bytef *)p, rem_x * sizeof(uint32_t)); p += rem_x; if ((y & PIXCELL_MASK) == PIXCELL_MASK || y == (h-1)) { if (orig_crc[x] != crc_p[x]) { orig_crc[x] = crc_p[x]; crc_p[x] = 1; changes++; } else { crc_p[x] = 0; } } } } /* If number of changes is > THRESH percent, send the whole screen */ if (((changes * 100) / (xcells * ycells)) >= RFB_SEND_ALL_THRESH) { retval = rfb_send_all(rc, cfd, gc_image); goto done; } /* Go through all cells, and send only changed ones */ crc_p = rc->crc_tmp; for (y = 0; y < h; y += PIX_PER_CELL) { /* previous cell's row */ celly = (y >> PIXCELL_SHIFT); /* Delta check crc to previous set */ for (x = 0; x < xcells; x++) { if (*crc_p++ == 0) continue; if (x == (xcells - 1) && rem_x > 0) cellwidth = rem_x; else cellwidth = PIX_PER_CELL; nwrite = rfb_send_rect(rc, cfd, gc_image, x * PIX_PER_CELL, celly * PIX_PER_CELL, cellwidth, y + PIX_PER_CELL >= h ? rem_y : PIX_PER_CELL); if (nwrite <= 0) { retval = nwrite; goto done; } } } retval = 1; done: pthread_mutex_lock(&rc->mtx); rc->sending = 0; pthread_mutex_unlock(&rc->mtx); return (retval); } static void rfb_recv_update_msg(struct rfb_softc *rc, int cfd, int discardonly) { struct rfb_updt_msg updt_msg; struct bhyvegc_image *gc_image; (void)stream_read(cfd, ((void *)&updt_msg) + 1 , sizeof(updt_msg) - 1); console_refresh(); gc_image = console_get_image(); updt_msg.x = htons(updt_msg.x); updt_msg.y = htons(updt_msg.y); updt_msg.width = htons(updt_msg.width); updt_msg.height = htons(updt_msg.height); if (updt_msg.width != gc_image->width || updt_msg.height != gc_image->height) { rc->width = gc_image->width; rc->height = gc_image->height; if (rc->enc_resize_ok) rfb_send_resize_update_msg(rc, cfd); } if (discardonly) return; rfb_send_screen(rc, cfd, 1); } static void rfb_recv_key_msg(struct rfb_softc *rc, int cfd) { struct rfb_key_msg key_msg; (void)stream_read(cfd, ((void *)&key_msg) + 1, sizeof(key_msg) - 1); console_key_event(key_msg.down, htonl(key_msg.code)); } static void rfb_recv_ptr_msg(struct rfb_softc *rc, int cfd) { struct rfb_ptr_msg ptr_msg; (void)stream_read(cfd, ((void *)&ptr_msg) + 1, sizeof(ptr_msg) - 1); console_ptr_event(ptr_msg.button, htons(ptr_msg.x), htons(ptr_msg.y)); } static void rfb_recv_cuttext_msg(struct rfb_softc *rc, int cfd) { struct rfb_cuttext_msg ct_msg; unsigned char buf[32]; int len; len = stream_read(cfd, ((void *)&ct_msg) + 1, sizeof(ct_msg) - 1); ct_msg.length = htonl(ct_msg.length); while (ct_msg.length > 0) { len = stream_read(cfd, buf, ct_msg.length > sizeof(buf) ? sizeof(buf) : ct_msg.length); ct_msg.length -= len; } } static int64_t timeval_delta(struct timeval *prev, struct timeval *now) { int64_t n1, n2; n1 = now->tv_sec * 1000000 + now->tv_usec; n2 = prev->tv_sec * 1000000 + prev->tv_usec; return (n1 - n2); } static void * rfb_wr_thr(void *arg) { struct rfb_softc *rc; fd_set rfds; struct timeval tv; struct timeval prev_tv; int64_t tdiff; int cfd; int err; rc = arg; cfd = rc->cfd; prev_tv.tv_sec = 0; prev_tv.tv_usec = 0; while (rc->cfd >= 0) { FD_ZERO(&rfds); FD_SET(cfd, &rfds); tv.tv_sec = 0; tv.tv_usec = 10000; err = select(cfd+1, &rfds, NULL, NULL, &tv); if (err < 0) return (NULL); /* Determine if its time to push screen; ~24hz */ gettimeofday(&tv, NULL); tdiff = timeval_delta(&prev_tv, &tv); if (tdiff > 40000) { prev_tv.tv_sec = tv.tv_sec; prev_tv.tv_usec = tv.tv_usec; if (rfb_send_screen(rc, cfd, 0) <= 0) { return (NULL); } } else { /* sleep */ usleep(40000 - tdiff); } } return (NULL); } void rfb_handle(struct rfb_softc *rc, int cfd) { const char *vbuf = "RFB 003.008\n"; unsigned char buf[80]; + unsigned char *message; + +#ifndef NO_OPENSSL + unsigned char challenge[AUTH_LENGTH]; + unsigned char keystr[PASSWD_LENGTH]; + unsigned char crypt_expected[AUTH_LENGTH]; + + DES_key_schedule ks; + int i; +#endif + pthread_t tid; - uint32_t sres; + uint32_t sres; int len; rc->cfd = cfd; /* 1a. Send server version */ stream_write(cfd, vbuf, strlen(vbuf)); /* 1b. Read client version */ len = read(cfd, buf, sizeof(buf)); - /* 2a. Send security type 'none' */ + /* 2a. Send security type */ buf[0] = 1; - buf[1] = 1; /* none */ +#ifndef NO_OPENSSL + if (rc->password) + buf[1] = SECURITY_TYPE_VNC_AUTH; + else + buf[1] = SECURITY_TYPE_NONE; +#else + buf[1] = SECURITY_TYPE_NONE; +#endif + stream_write(cfd, buf, 2); - /* 2b. Read agreed security type */ len = stream_read(cfd, buf, 1); - /* 2c. Write back a status of 0 */ - sres = 0; + /* 2c. Do VNC authentication */ + switch (buf[0]) { + case SECURITY_TYPE_NONE: + sres = 0; + break; + case SECURITY_TYPE_VNC_AUTH: + /* + * The client encrypts the challenge with DES, using a password + * supplied by the user as the key. + * To form the key, the password is truncated to + * eight characters, or padded with null bytes on the right. + * The client then sends the resulting 16-bytes response. + */ +#ifndef NO_OPENSSL + strncpy(keystr, rc->password, PASSWD_LENGTH); + + /* VNC clients encrypts the challenge with all the bit fields + * in each byte of the password mirrored. + * Here we flip each byte of the keystr. + */ + for (i = 0; i < PASSWD_LENGTH; i++) { + keystr[i] = (keystr[i] & 0xF0) >> 4 + | (keystr[i] & 0x0F) << 4; + keystr[i] = (keystr[i] & 0xCC) >> 2 + | (keystr[i] & 0x33) << 2; + keystr[i] = (keystr[i] & 0xAA) >> 1 + | (keystr[i] & 0x55) << 1; + } + + /* Initialize a 16-byte random challenge */ + arc4random_buf(challenge, sizeof(challenge)); + stream_write(cfd, challenge, AUTH_LENGTH); + + /* Receive the 16-byte challenge response */ + stream_read(cfd, buf, AUTH_LENGTH); + + memcpy(crypt_expected, challenge, AUTH_LENGTH); + + /* Encrypt the Challenge with DES */ + DES_set_key((C_Block *)keystr, &ks); + DES_ecb_encrypt((C_Block *)challenge, + (C_Block *)crypt_expected, &ks, DES_ENCRYPT); + DES_ecb_encrypt((C_Block *)(challenge + PASSWD_LENGTH), + (C_Block *)(crypt_expected + PASSWD_LENGTH), + &ks, DES_ENCRYPT); + + if (memcmp(crypt_expected, buf, AUTH_LENGTH) != 0) { + message = "Auth Failed: Invalid Password."; + sres = htonl(1); + } else + sres = 0; +#else + sres = 0; + WPRINTF(("Auth not supported, no OpenSSL in your system")); +#endif + + break; + } + + /* 2d. Write back a status */ stream_write(cfd, &sres, 4); + if (sres) { + *((uint32_t *) buf) = htonl(strlen(message)); + stream_write(cfd, buf, 4); + stream_write(cfd, message, strlen(message)); + goto done; + } + /* 3a. Read client shared-flag byte */ len = stream_read(cfd, buf, 1); /* 4a. Write server-init info */ rfb_send_server_init_msg(cfd); if (!rc->zbuf) { rc->zbuf = malloc(RFB_ZLIB_BUFSZ + 16); assert(rc->zbuf != NULL); } rfb_send_screen(rc, cfd, 1); pthread_create(&tid, NULL, rfb_wr_thr, rc); pthread_set_name_np(tid, "rfbout"); /* Now read in client requests. 1st byte identifies type */ for (;;) { len = read(cfd, buf, 1); if (len <= 0) { DPRINTF(("rfb client exiting\r\n")); break; } switch (buf[0]) { case 0: rfb_recv_set_pixfmt_msg(rc, cfd); break; case 2: rfb_recv_set_encodings_msg(rc, cfd); break; case 3: rfb_recv_update_msg(rc, cfd, 1); break; case 4: rfb_recv_key_msg(rc, cfd); break; case 5: rfb_recv_ptr_msg(rc, cfd); break; case 6: rfb_recv_cuttext_msg(rc, cfd); break; default: WPRINTF(("rfb unknown cli-code %d!\n", buf[0] & 0xff)); goto done; } } done: rc->cfd = -1; pthread_join(tid, NULL); if (rc->enc_zlib_ok) deflateEnd(&rc->zstream); } static void * rfb_thr(void *arg) { struct rfb_softc *rc; sigset_t set; int cfd; rc = arg; sigemptyset(&set); sigaddset(&set, SIGPIPE); if (pthread_sigmask(SIG_BLOCK, &set, NULL) != 0) { perror("pthread_sigmask"); return (NULL); } for (;;) { rc->enc_raw_ok = false; rc->enc_zlib_ok = false; rc->enc_resize_ok = false; cfd = accept(rc->sfd, NULL, NULL); if (rc->conn_wait) { pthread_mutex_lock(&rc->mtx); pthread_cond_signal(&rc->cond); pthread_mutex_unlock(&rc->mtx); rc->conn_wait = 0; } rfb_handle(rc, cfd); close(cfd); } /* NOTREACHED */ return (NULL); } static int sse42_supported(void) { u_int cpu_registers[4], ecx; do_cpuid(1, cpu_registers); ecx = cpu_registers[2]; return ((ecx & CPUID2_SSE42) != 0); } int -rfb_init(char *hostname, int port, int wait) +rfb_init(char *hostname, int port, int wait, char *password) { struct rfb_softc *rc; struct sockaddr_in sin; int on = 1; #ifndef WITHOUT_CAPSICUM cap_rights_t rights; #endif rc = calloc(1, sizeof(struct rfb_softc)); rc->crc = calloc(howmany(RFB_MAX_WIDTH * RFB_MAX_HEIGHT, 32), sizeof(uint32_t)); rc->crc_tmp = calloc(howmany(RFB_MAX_WIDTH * RFB_MAX_HEIGHT, 32), sizeof(uint32_t)); rc->crc_width = RFB_MAX_WIDTH; rc->crc_height = RFB_MAX_HEIGHT; + + rc->password = password; rc->sfd = socket(AF_INET, SOCK_STREAM, 0); if (rc->sfd < 0) { perror("socket"); return (-1); } setsockopt(rc->sfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); sin.sin_len = sizeof(sin); sin.sin_family = AF_INET; sin.sin_port = port ? htons(port) : htons(5900); if (hostname && strlen(hostname) > 0) inet_pton(AF_INET, hostname, &(sin.sin_addr)); else sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); if (bind(rc->sfd, (struct sockaddr *)&sin, sizeof(sin)) < 0) { perror("bind"); return (-1); } if (listen(rc->sfd, 1) < 0) { perror("listen"); return (-1); } #ifndef WITHOUT_CAPSICUM cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE); if (cap_rights_limit(rc->sfd, &rights) == -1 && errno != ENOSYS) errx(EX_OSERR, "Unable to apply rights for sandbox"); #endif rc->hw_crc = sse42_supported(); rc->conn_wait = wait; if (wait) { pthread_mutex_init(&rc->mtx, NULL); pthread_cond_init(&rc->cond, NULL); } pthread_create(&rc->tid, NULL, rfb_thr, rc); pthread_set_name_np(rc->tid, "rfb"); if (wait) { DPRINTF(("Waiting for rfb client...\n")); pthread_mutex_lock(&rc->mtx); pthread_cond_wait(&rc->cond, &rc->mtx); pthread_mutex_unlock(&rc->mtx); } return (0); } Property changes on: projects/clang500-import/usr.sbin/bhyve/rfb.c ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/bhyve/rfb.c:r317808-319547 Index: projects/clang500-import/usr.sbin/bhyve/rfb.h =================================================================== --- projects/clang500-import/usr.sbin/bhyve/rfb.h (revision 319548) +++ projects/clang500-import/usr.sbin/bhyve/rfb.h (revision 319549) @@ -1,36 +1,36 @@ /*- * Copyright (c) 2015 Tycho Nightingale * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _RFB_H_ #define _RFB_H_ #define RFB_PORT 5900 -int rfb_init(char *hostname, int port, int wait); +int rfb_init(char *hostname, int port, int wait, char *password); #endif /* _RFB_H_ */ Property changes on: projects/clang500-import/usr.sbin/bhyve/rfb.h ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/bhyve/rfb.h:r316992-319547 Index: projects/clang500-import/usr.sbin/makefs/ffs/buf.c =================================================================== --- projects/clang500-import/usr.sbin/makefs/ffs/buf.c (revision 319548) +++ projects/clang500-import/usr.sbin/makefs/ffs/buf.c (revision 319549) @@ -1,214 +1,214 @@ /* $NetBSD: buf.c,v 1.13 2004/06/20 22:20:18 jmc Exp $ */ /* * Copyright (c) 2001 Wasabi Systems, Inc. * All rights reserved. * * Written by Luke Mewburn for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "makefs.h" #include "buf.h" static TAILQ_HEAD(buftailhead,buf) buftail; int bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *u1 __unused, struct buf **bpp) { off_t offset; ssize_t rv; - fsinfo_t *fsinfo = vp->fs; + fsinfo_t *fs = vp->fs; assert (bpp != NULL); if (debug & DEBUG_BUF_BREAD) printf("%s: blkno %lld size %d\n", __func__, (long long)blkno, size); *bpp = getblk(vp, blkno, size, 0, 0, 0); - offset = (*bpp)->b_blkno * fsinfo->sectorsize + fsinfo->offset; + offset = (*bpp)->b_blkno * fs->sectorsize + fs->offset; if (debug & DEBUG_BUF_BREAD) printf("%s: blkno %lld offset %lld bcount %ld\n", __func__, (long long)(*bpp)->b_blkno, (long long) offset, (*bpp)->b_bcount); if (lseek((*bpp)->b_fs->fd, offset, SEEK_SET) == -1) err(1, "%s: lseek %lld (%lld)", __func__, (long long)(*bpp)->b_blkno, (long long)offset); rv = read((*bpp)->b_fs->fd, (*bpp)->b_data, (*bpp)->b_bcount); if (debug & DEBUG_BUF_BREAD) printf("%s: read %ld (%lld) returned %d\n", __func__, (*bpp)->b_bcount, (long long)offset, (int)rv); if (rv == -1) /* read error */ err(1, "%s: read %ld (%lld) returned %d", __func__, (*bpp)->b_bcount, (long long)offset, (int)rv); else if (rv != (*bpp)->b_bcount) /* short read */ err(1, "%s: read %ld (%lld) returned %d", __func__, (*bpp)->b_bcount, (long long)offset, (int)rv); else return (0); } void brelse(struct buf *bp, int u1 __unused) { assert (bp != NULL); assert (bp->b_data != NULL); if (bp->b_lblkno < 0) { /* * XXX don't remove any buffers with negative logical block * numbers (lblkno), so that we retain the mapping * of negative lblkno -> real blkno that ffs_balloc() * sets up. * * if we instead released these buffers, and implemented * ufs_strategy() (and ufs_bmaparray()) and called those * from bread() and bwrite() to convert the lblkno to * a real blkno, we'd add a lot more code & complexity * and reading off disk, for little gain, because this * simple hack works for our purpose. */ bp->b_bcount = 0; return; } TAILQ_REMOVE(&buftail, bp, b_tailq); free(bp->b_data); free(bp); } int bwrite(struct buf *bp) { off_t offset; ssize_t rv; fsinfo_t *fs = bp->b_fs; assert (bp != NULL); offset = bp->b_blkno * fs->sectorsize + fs->offset; if (debug & DEBUG_BUF_BWRITE) printf("bwrite: blkno %lld offset %lld bcount %ld\n", (long long)bp->b_blkno, (long long) offset, bp->b_bcount); if (lseek(bp->b_fs->fd, offset, SEEK_SET) == -1) return (errno); rv = write(bp->b_fs->fd, bp->b_data, bp->b_bcount); if (debug & DEBUG_BUF_BWRITE) printf("bwrite: write %ld (offset %lld) returned %lld\n", bp->b_bcount, (long long)offset, (long long)rv); if (rv == bp->b_bcount) return (0); else if (rv == -1) /* write error */ return (errno); else /* short write ? */ return (EAGAIN); } void bcleanup(void) { struct buf *bp; /* * XXX this really shouldn't be necessary, but i'm curious to * know why there's still some buffers lying around that * aren't brelse()d */ if (TAILQ_EMPTY(&buftail)) return; printf("bcleanup: unflushed buffers:\n"); TAILQ_FOREACH(bp, &buftail, b_tailq) { printf("\tlblkno %10lld blkno %10lld count %6ld bufsize %6ld\n", (long long)bp->b_lblkno, (long long)bp->b_blkno, bp->b_bcount, bp->b_bufsize); } printf("bcleanup: done\n"); } struct buf * getblk(struct vnode *vp, daddr_t blkno, int size, int u1 __unused, int u2 __unused, int u3 __unused) { static int buftailinitted; struct buf *bp; void *n; if (debug & DEBUG_BUF_GETBLK) printf("getblk: blkno %lld size %d\n", (long long)blkno, size); bp = NULL; if (!buftailinitted) { if (debug & DEBUG_BUF_GETBLK) printf("getblk: initialising tailq\n"); TAILQ_INIT(&buftail); buftailinitted = 1; } else { TAILQ_FOREACH(bp, &buftail, b_tailq) { if (bp->b_lblkno != blkno) continue; break; } } if (bp == NULL) { bp = ecalloc(1, sizeof(*bp)); bp->b_bufsize = 0; bp->b_blkno = bp->b_lblkno = blkno; bp->b_fs = vp->fs; bp->b_data = NULL; TAILQ_INSERT_HEAD(&buftail, bp, b_tailq); } bp->b_bcount = size; if (bp->b_data == NULL || bp->b_bcount > bp->b_bufsize) { n = erealloc(bp->b_data, size); memset(n, 0, size); bp->b_data = n; bp->b_bufsize = size; } return (bp); } Index: projects/clang500-import/usr.sbin/newsyslog/tests/legacy_test.sh =================================================================== --- projects/clang500-import/usr.sbin/newsyslog/tests/legacy_test.sh (revision 319548) +++ projects/clang500-import/usr.sbin/newsyslog/tests/legacy_test.sh (revision 319549) @@ -1,540 +1,539 @@ #!/bin/sh # $FreeBSD$ # A regular expression matching the format of an RFC-5424 log line header, # including the timestamp up through the seconds indicator; it does not include # the (optional) timezone offset. RFC5424_FMT='^<[0-9][0-9]*>1 [0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}' # A regular expression matching the format of an RFC-3164 (traditional syslog) # log line header, including the timestamp. RFC3164_FMT='^[A-Z][a-z]{2} [ 0-9][0-9] [0-9]{2}:[0-9]{2}:[0-9]{2}' COUNT=0 TMPDIR=$(pwd)/work if [ $? -ne 0 ]; then echo "$0: Can't create temp dir, exiting..." exit 1 fi # Begin an individual test begin() { COUNT=`expr $COUNT + 1` OK=1 NAME="$1" } # End an individual test end() { local message if [ $OK = 1 ] then message='ok ' else message='not ok ' fi message="$message $COUNT - $NAME" if [ -n "$TODO" ] then message="$message # TODO $TODO" fi echo "$message" } # Make a file that can later be verified mkf() { CN=`basename $1` echo "$CN-$CN" >$1 } # Verify that the file specified is correct ckf() { if [ -f $2 ] && echo "$1-$1" | diff - $2 >/dev/null then ok else notok fi } # Check that a file exists ckfe() { if [ -f $1 ] then ok else notok fi } # Verify that the specified file does not exist # (is not there) cknt() { if [ -r $1 ] then notok else ok fi } # Check if a file is there, depending of if it's supposed to or not - # basically how many log files we are supposed to keep vs. how many we # actually keep. ckntfe() { curcnt=$1 keepcnt=$2 f=$3 if [ $curcnt -le $keepcnt ] then #echo Assuming file there ckfe $f else #echo Assuming file NOT there cknt $f fi } # Verify that the specified file has RFC-5424 rotation messages. ckrfc5424() { local lc=$(wc -l $1 | cut -w -f2) local rc=$(grep -cE "${RFC5424_FMT}" $1) if [ "$lc" -eq 0 -o "$rc" -eq 0 -o "$lc" -ne "$rc" ] then notok else ok fi } # Verify that the specified file has RFC-3164 rotation messages. ckrfc3164() { local lc=$(wc -l $1 | cut -w -f2) local rc=$(grep -cE "${RFC3164_FMT}" $1) if [ "$lc" -eq 0 -o "$rc" -eq 0 -o "$lc" -ne "$rc" ] then notok else ok fi } # A part of a test succeeds ok() { : } # A part of a test fails notok() { OK=0 } # Verify that the exit code passed is for unsuccessful termination ckfail() { if [ $1 -gt 0 ] then ok else notok fi } # Verify that the exit code passed is for successful termination ckok() { if [ $1 -eq 0 ] then ok else notok fi } # Check that there are X files which match expr chkfcnt() { cnt=$1; shift if [ $cnt -eq `echo "$@" | wc -w` ] then ok else notok fi } # Check that two strings are alike ckstr() { if [ "$1" = "$2" ] then ok else notok fi } tmpdir_create() { mkdir -p ${TMPDIR}/log ${TMPDIR}/alog cd ${TMPDIR}/log } tmpdir_clean() { cd ${TMPDIR} rm -rf "${TMPDIR}/log" "${TMPDIR}/alog" newsyslog.conf } run_newsyslog() { newsyslog -f ../newsyslog.conf -F -r "$@" } tests_normal_rotate() { ext="$1" dir="$2" if [ -n "$dir" ]; then newsyslog_args=" -a ${dir}" name_postfix="${ext} archive dir" else newsyslog_args="" name_postfix="${ext}" fi tmpdir_create begin "create file ${name_postfix}" -newdir run_newsyslog -C ckfe $LOGFNAME cknt ${dir}${LOGFNAME}.0${ext} end begin "rotate normal 1 ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckfe ${dir}${LOGFNAME}.0${ext} cknt ${dir}${LOGFNAME}.1${ext} end begin "rotate normal 2 ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckfe ${dir}${LOGFNAME}.0${ext} ckfe ${dir}${LOGFNAME}.1${ext} cknt ${dir}${LOGFNAME}.2${ext} end begin "rotate normal 3 ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckfe ${dir}${LOGFNAME}.0${ext} ckfe ${dir}${LOGFNAME}.1${ext} ckfe ${dir}${LOGFNAME}.2${ext} cknt ${dir}${LOGFNAME}.3${ext} end begin "rotate normal 4 ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckfe ${dir}${LOGFNAME}.0${ext} ckfe ${dir}${LOGFNAME}.1${ext} ckfe ${dir}${LOGFNAME}.2${ext} cknt ${dir}${LOGFNAME}.4${ext} end begin "rotate normal 5 ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckfe ${dir}${LOGFNAME}.0${ext} ckfe ${dir}${LOGFNAME}.1${ext} ckfe ${dir}${LOGFNAME}.2${ext} cknt ${dir}${LOGFNAME}.4${ext} end # Wait a bit so we can see if the noaction test rotates files sleep 1.1 begin "noaction ${name_postfix}" ofiles=`ls -Tl ${dir}${LOGFNAME}.*${ext} | tr -d '\n'` run_newsyslog ${newsyslog_args} -n >/dev/null ckfe ${LOGFNAME} ckstr "$ofiles" "`ls -lT ${dir}${LOGFNAME}.*${ext} | tr -d '\n'`" end tmpdir_clean } tests_normal_rotate_keepn() { cnt="$1" ext="$2" dir="$3" if [ -n "$dir" ]; then newsyslog_args=" -a ${dir}" name_postfix="${ext} archive dir" else newsyslog_args="" name_postfix="${ext}" fi tmpdir_create begin "create file ${name_postfix}" -newdir run_newsyslog -C ckfe $LOGFNAME cknt ${dir}${LOGFNAME}.0${ext} end begin "rotate normal 1 cnt=$cnt ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckntfe 1 $cnt ${dir}${LOGFNAME}.0${ext} cknt ${dir}${LOGFNAME}.1${ext} end begin "rotate normal 2 cnt=$cnt ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckntfe 1 $cnt ${dir}${LOGFNAME}.0${ext} ckntfe 2 $cnt ${dir}${LOGFNAME}.1${ext} cknt ${dir}${LOGFNAME}.2${ext} end begin "rotate normal 3 cnt=$cnt ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckntfe 1 $cnt ${dir}${LOGFNAME}.0${ext} ckntfe 2 $cnt ${dir}${LOGFNAME}.1${ext} ckntfe 3 $cnt ${dir}${LOGFNAME}.2${ext} cknt ${dir}${LOGFNAME}.3${ext} end begin "rotate normal 3 cnt=$cnt ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckntfe 1 $cnt ${dir}${LOGFNAME}.0${ext} ckntfe 2 $cnt ${dir}${LOGFNAME}.1${ext} ckntfe 3 $cnt ${dir}${LOGFNAME}.2${ext} ckntfe 4 $cnt ${dir}${LOGFNAME}.3${ext} cknt ${dir}${LOGFNAME}.4${ext} end # Wait a bit so we can see if the noaction test rotates files sleep 1.1 begin "noaction ${name_postfix}" osum=`md5 ${dir}${LOGFNAME} | tr -d '\n'` run_newsyslog ${newsyslog_args} -n >/dev/null ckfe ${LOGFNAME} ckstr "$osum" "`md5 ${dir}${LOGFNAME} | tr -d '\n'`" end tmpdir_clean } tests_time_rotate() { ext="$1" dir="$2" if [ -n "$dir" ]; then newsyslog_args="-t DEFAULT -a ${dir}" name_postfix="${ext} archive dir" else newsyslog_args="-t DEFAULT" name_postfix="${ext}" fi tmpdir_create begin "create file ${name_postfix}" -newdir run_newsyslog -C ${newsyslog_args} ckfe ${LOGFNAME} end begin "rotate time 1 ${name_postfix}" run_newsyslog ${newsyslog_args} ckfe ${LOGFNAME} chkfcnt 1 ${dir}${LOGFNAME}.*${ext} end sleep 1.1 - ( TODO="rotate time 2-4 fail today; bug 212160" begin "rotate time 2 ${name_postfix}" run_newsyslog ${newsyslog_args} ckfe ${LOGFNAME} chkfcnt 2 ${dir}${LOGFNAME}.*${ext} end sleep 1.1 begin "rotate time 3 ${name_postfix}" run_newsyslog ${newsyslog_args} ckfe ${LOGFNAME} chkfcnt 3 ${dir}${LOGFNAME}.*${ext} end sleep 1.1 begin "rotate time 4 ${name_postfix}" run_newsyslog ${newsyslog_args} ckfe ${LOGFNAME} chkfcnt 3 ${dir}${LOGFNAME}.*${ext} end - ) + unset TODO begin "noaction ${name_postfix}" ofiles=`ls -1 ${dir}${LOGFNAME}.*${ext} | tr -d '\n'` run_newsyslog ${newsyslog_args} -n >/dev/null ckfe ${LOGFNAME} ckstr "$ofiles" "`ls -1 ${dir}${LOGFNAME}.*${ext} | tr -d '\n'`" end tmpdir_clean } tests_rfc5424() { ext="$1" dir="$2" if [ -n "$dir" ]; then newsyslog_args=" -a ${dir}" name_postfix="${ext} archive dir" else newsyslog_args="" name_postfix="${ext}" fi tmpdir_create begin "RFC-5424 - create file ${name_postfix}" -newdir run_newsyslog -C ckfe $LOGFNAME cknt ${dir}${LOGFNAME}.0${ext} ckfe $LOGFNAME5424 cknt ${dir}${LOGFNAME5424}.0${ext} ckrfc3164 ${LOGFNAME} ckrfc5424 ${LOGFNAME5424} end begin "RFC-5424 - rotate normal 1 ${name_postfix}" run_newsyslog $newsyslog_args ckfe ${LOGFNAME} ckfe ${dir}${LOGFNAME}.0${ext} ckfe $LOGFNAME5424 ckfe ${dir}${LOGFNAME5424}.0${ext} ckrfc3164 ${LOGFNAME} ckrfc3164 ${dir}${LOGFNAME}.0${ext} ckrfc5424 ${LOGFNAME5424} ckrfc5424 ${dir}${LOGFNAME5424}.0${ext} end tmpdir_clean } -echo 1..126 +echo 1..128 mkdir -p ${TMPDIR} cd ${TMPDIR} LOGFNAME=foo.log LOGFPATH=${TMPDIR}/log/${LOGFNAME} # Log file for RFC-5424 testing LOGFNAME5424=foo5424.log LOGFPATH5424=${TMPDIR}/log/${LOGFNAME5424} # Normal, no archive dir, keep X files echo "$LOGFPATH 640 0 * @T00 NC" > newsyslog.conf tests_normal_rotate_keepn 0 echo "$LOGFPATH 640 1 * @T00 NC" > newsyslog.conf tests_normal_rotate_keepn 1 echo "$LOGFPATH 640 2 * @T00 NC" > newsyslog.conf tests_normal_rotate_keepn 2 echo "$LOGFPATH 640 3 * @T00 NC" > newsyslog.conf tests_normal_rotate_keepn 3 # Normal, no archive dir, keep X files, gz echo "$LOGFPATH 640 0 * @T00 NCZ" > newsyslog.conf tests_normal_rotate_keepn 0 ".gz" echo "$LOGFPATH 640 1 * @T00 NCZ" > newsyslog.conf tests_normal_rotate_keepn 1 ".gz" echo "$LOGFPATH 640 2 * @T00 NCZ" > newsyslog.conf tests_normal_rotate_keepn 2 ".gz" echo "$LOGFPATH 640 3 * @T00 NCZ" > newsyslog.conf tests_normal_rotate_keepn 3 ".gz" # Normal, no archive dir echo "$LOGFPATH 640 3 * @T00 NC" > newsyslog.conf tests_normal_rotate echo "$LOGFPATH 640 3 * @T00 NCZ" > newsyslog.conf tests_normal_rotate ".gz" echo "$LOGFPATH 640 3 * @T00 NCJ" > newsyslog.conf tests_normal_rotate ".bz2" # Normal, archive dir echo "$LOGFPATH 640 3 * @T00 NC" > newsyslog.conf tests_normal_rotate "" "${TMPDIR}/alog/" echo "$LOGFPATH 640 3 * @T00 NCZ" > newsyslog.conf tests_normal_rotate ".gz" "${TMPDIR}/alog/" echo "$LOGFPATH 640 3 * @T00 NCJ" > newsyslog.conf tests_normal_rotate ".bz2" "${TMPDIR}/alog/" # Time based, no archive dir echo "$LOGFPATH 640 3 * @T00 NC" > newsyslog.conf tests_time_rotate echo "$LOGFPATH 640 3 * @T00 NCZ" > newsyslog.conf tests_time_rotate "gz" "" echo "$LOGFPATH 640 3 * @T00 NCJ" > newsyslog.conf tests_time_rotate "bz2" "" # Time based, archive dir echo "$LOGFPATH 640 3 * @T00 NC" > newsyslog.conf tests_time_rotate "" "${TMPDIR}/alog/" echo "$LOGFPATH 640 3 * @T00 NCZ" > newsyslog.conf tests_time_rotate "gz" "${TMPDIR}/alog/" echo "$LOGFPATH 640 3 * @T00 NCJ" > newsyslog.conf tests_time_rotate "bz2" "${TMPDIR}/alog/" # RFC-5424; Normal, no archive dir echo "$LOGFPATH5424 640 3 * @T00 NCT" > newsyslog.conf echo "$LOGFPATH 640 3 * @T00 NC" >> newsyslog.conf tests_rfc5424 rm -rf "${TMPDIR}" Index: projects/clang500-import =================================================================== --- projects/clang500-import (revision 319548) +++ projects/clang500-import (revision 319549) Property changes on: projects/clang500-import ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r319480-319547