Index: projects/clang700-import/Makefile.inc1 =================================================================== --- projects/clang700-import/Makefile.inc1 (revision 340426) +++ projects/clang700-import/Makefile.inc1 (revision 340427) @@ -1,3353 +1,3353 @@ # # $FreeBSD$ # # Make command line options: # -DNO_CLEANDIR run ${MAKE} clean, instead of ${MAKE} cleandir # -DNO_CLEAN do not clean at all # -DDB_FROM_SRC use the user/group databases in src/etc instead of # the system database when installing. # -DNO_SHARE do not go into share subdir # -DKERNFAST define NO_KERNEL{CONFIG,CLEAN,OBJ} # -DNO_KERNELCONFIG do not run config in ${MAKE} buildkernel # -DNO_KERNELCLEAN do not run ${MAKE} clean in ${MAKE} buildkernel # -DNO_KERNELOBJ do not run ${MAKE} obj in ${MAKE} buildkernel # -DNO_PORTSUPDATE do not update ports in ${MAKE} update # -DNO_ROOT install without using root privilege # -DNO_DOCUPDATE do not update doc in ${MAKE} update # -DWITHOUT_CTF do not run the DTrace CTF conversion tools on built objects # LOCAL_DIRS="list of dirs" to add additional dirs to the SUBDIR list # LOCAL_ITOOLS="list of tools" to add additional tools to the ITOOLS list # LOCAL_LIB_DIRS="list of dirs" to add additional dirs to libraries target # LOCAL_MTREE="list of mtree files" to process to allow local directories # to be created before files are installed # LOCAL_TOOL_DIRS="list of dirs" to add additional dirs to the build-tools # list # LOCAL_XTOOL_DIRS="list of dirs" to add additional dirs to the # cross-tools target # METALOG="path to metadata log" to write permission and ownership # when NO_ROOT is set. (default: ${DESTDIR}/METALOG) # TARGET="machine" to crossbuild world for a different machine type # TARGET_ARCH= may be required when a TARGET supports multiple endians # BUILDENV_SHELL= shell to launch for the buildenv target (def:${SHELL}) # WORLD_FLAGS= additional flags to pass to make(1) during buildworld # KERNEL_FLAGS= additional flags to pass to make(1) during buildkernel # SUBDIR_OVERRIDE="list of dirs" to build rather than everything. # All libraries and includes, and some build tools will still build. # # The intended user-driven targets are: # buildworld - rebuild *everything*, including glue to help do upgrades # installworld- install everything built by "buildworld" # checkworld - run test suite on installed world # doxygen - build API documentation of the kernel # update - convenient way to update your source tree (eg: svn/svnup) # # Standard targets (not defined here) are documented in the makefiles in # /usr/share/mk. These include: # obj depend all install clean cleandepend cleanobj .if !defined(TARGET) || !defined(TARGET_ARCH) .error "Both TARGET and TARGET_ARCH must be defined." .endif .if make(showconfig) || make(test-system-*) _MKSHOWCONFIG= t .endif SRCDIR?= ${.CURDIR} LOCALBASE?= /usr/local # Cross toolchain changes must be in effect before bsd.compiler.mk # so that gets the right CC, and pass CROSS_TOOLCHAIN to submakes. .if defined(CROSS_TOOLCHAIN) .if exists(${LOCALBASE}/share/toolchains/${CROSS_TOOLCHAIN}.mk) .include "${LOCALBASE}/share/toolchains/${CROSS_TOOLCHAIN}.mk" .elif exists(${CROSS_TOOLCHAIN}) .include "${CROSS_TOOLCHAIN}" .else .error CROSS_TOOLCHAIN ${CROSS_TOOLCHAIN} not found .endif CROSSENV+=CROSS_TOOLCHAIN="${CROSS_TOOLCHAIN}" .endif .if defined(CROSS_TOOLCHAIN_PREFIX) CROSS_COMPILER_PREFIX?=${CROSS_TOOLCHAIN_PREFIX} .endif XCOMPILERS= CC CXX CPP .for COMPILER in ${XCOMPILERS} .if defined(CROSS_COMPILER_PREFIX) X${COMPILER}?= ${CROSS_COMPILER_PREFIX}${${COMPILER}} .else X${COMPILER}?= ${${COMPILER}} .endif .endfor # If a full path to an external cross compiler is given, don't build # a cross compiler. .if ${XCC:N${CCACHE_BIN}:M/*} MK_CLANG_BOOTSTRAP= no MK_GCC_BOOTSTRAP= no .endif # Pull in compiler metadata from buildworld/toolchain if possible to avoid # running CC from bsd.compiler.mk. .if make(installworld) || make(install) || make(distributeworld) || \ make(stageworld) .-include "${OBJTOP}/toolchain-metadata.mk" .if !defined(_LOADED_TOOLCHAIN_METADATA) .error A build is required first. You may have the wrong MAKEOBJDIRPREFIX set. .endif .endif # Pull in COMPILER_TYPE and COMPILER_FREEBSD_VERSION early. Pull it from the # tree to be friendlier to foreign OS builds. It's safe to do so unconditionally # here since we will always have the right make, unlike in src/Makefile # Don't include bsd.linker.mk yet until XBINUTILS is handled (after src.opts.mk) _NO_INCLUDE_LINKERMK= t # We also want the X_COMPILER* variables if we are using an external toolchain. _WANT_TOOLCHAIN_CROSS_VARS= t .include "share/mk/bsd.compiler.mk" .undef _NO_INCLUDE_LINKERMK .undef _WANT_TOOLCHAIN_CROSS_VARS # src.opts.mk depends on COMPILER_FEATURES .include "share/mk/src.opts.mk" .if ${TARGET} == ${MACHINE} TARGET_CPUTYPE?=${CPUTYPE} .else TARGET_CPUTYPE?= .endif .if !empty(TARGET_CPUTYPE) _TARGET_CPUTYPE=${TARGET_CPUTYPE} .else _TARGET_CPUTYPE=dummy .endif .if ${TARGET} == "arm" .if ${TARGET_ARCH:Marmv[67]*} != "" && ${TARGET_CPUTYPE:M*soft*} == "" TARGET_ABI= gnueabihf .else TARGET_ABI= gnueabi .endif .endif MACHINE_ABI?= unknown MACHINE_TRIPLE?=${MACHINE_ARCH:S/amd64/x86_64/:C/hf$//:S/mipsn32/mips64/}-${MACHINE_ABI}-freebsd13.0 TARGET_ABI?= unknown TARGET_TRIPLE?= ${TARGET_ARCH:S/amd64/x86_64/:C/hf$//:S/mipsn32/mips64/}-${TARGET_ABI}-freebsd13.0 KNOWN_ARCHES?= aarch64/arm64 \ amd64 \ arm \ armv6/arm \ armv7/arm \ i386 \ mips \ mipsel/mips \ mips64el/mips \ mipsn32el/mips \ mips64/mips \ mipsn32/mips \ mipshf/mips \ mipselhf/mips \ mips64elhf/mips \ mips64hf/mips \ powerpc \ powerpc64/powerpc \ powerpcspe/powerpc \ riscv64/riscv \ riscv64sf/riscv \ sparc64 .if ${TARGET} == ${TARGET_ARCH} _t= ${TARGET} .else _t= ${TARGET_ARCH}/${TARGET} .endif .for _t in ${_t} .if empty(KNOWN_ARCHES:M${_t}) .error Unknown target ${TARGET_ARCH}:${TARGET}. .endif .endfor # If all targets are disabled for system llvm then don't expect it to work # for cross-builds. .if !defined(TOOLS_PREFIX) && ${MK_LLVM_TARGET_ALL} == "no" && \ ${MACHINE} != ${TARGET} && ${MACHINE_ARCH} != ${TARGET_ARCH} && \ !make(showconfig) MK_SYSTEM_COMPILER= no MK_SYSTEM_LINKER= no .endif # Handle external binutils. .if defined(CROSS_TOOLCHAIN_PREFIX) CROSS_BINUTILS_PREFIX?=${CROSS_TOOLCHAIN_PREFIX} .endif # If we do not have a bootstrap binutils (because the in-tree one does not # support the target architecture), provide a default cross-binutils prefix. # This allows riscv64 builds, for example, to automatically use the # riscv64-binutils port or package. .if !make(showconfig) && !defined(_NO_INCLUDE_COMPILERMK) .if !empty(BROKEN_OPTIONS:MBINUTILS_BOOTSTRAP) && \ ${MK_LLD_BOOTSTRAP} == "no" && \ !defined(CROSS_BINUTILS_PREFIX) CROSS_BINUTILS_PREFIX=/usr/local/${TARGET_TRIPLE}/bin/ .if !exists(${CROSS_BINUTILS_PREFIX}) .error In-tree binutils does not support the ${TARGET_ARCH} architecture. Install the ${TARGET_ARCH}-binutils port or package or set CROSS_BINUTILS_PREFIX. .endif .endif .endif XBINUTILS= AS AR LD NM OBJCOPY RANLIB SIZE STRINGS .for BINUTIL in ${XBINUTILS} .if defined(CROSS_BINUTILS_PREFIX) && \ exists(${CROSS_BINUTILS_PREFIX}/${${BINUTIL}}) X${BINUTIL}?= ${CROSS_BINUTILS_PREFIX:C,/*$,,}/${${BINUTIL}} .else X${BINUTIL}?= ${${BINUTIL}} .endif .endfor # If a full path to an external linker is given, don't build lld. .if ${XLD:M/*} MK_LLD_BOOTSTRAP= no .endif # We also want the X_LINKER* variables if we are using an external toolchain. _WANT_TOOLCHAIN_CROSS_VARS= t .include "share/mk/bsd.linker.mk" .undef _WANT_TOOLCHAIN_CROSS_VARS # Begin WITH_SYSTEM_COMPILER / WITH_SYSTEM_LD # WITH_SYSTEM_COMPILER - Pull in needed values and make a decision. # Check if there is a local compiler that can satisfy as an external compiler. # Which compiler is expected to be used? .if ${MK_CLANG_BOOTSTRAP} == "yes" WANT_COMPILER_TYPE= clang .elif ${MK_GCC_BOOTSTRAP} == "yes" WANT_COMPILER_TYPE= gcc .else WANT_COMPILER_TYPE= .endif .if !defined(WANT_COMPILER_FREEBSD_VERSION) && !make(showconfig) && \ !make(test-system-linker) .if ${WANT_COMPILER_TYPE} == "clang" WANT_COMPILER_FREEBSD_VERSION_FILE= lib/clang/freebsd_cc_version.h WANT_COMPILER_FREEBSD_VERSION!= \ awk '$$2 == "FREEBSD_CC_VERSION" {printf("%d\n", $$3)}' \ ${SRCDIR}/${WANT_COMPILER_FREEBSD_VERSION_FILE} || echo unknown WANT_COMPILER_VERSION_FILE= lib/clang/include/clang/Basic/Version.inc WANT_COMPILER_VERSION!= \ awk '$$2 == "CLANG_VERSION" {split($$3, a, "."); print a[1] * 10000 + a[2] * 100 + a[3]}' \ ${SRCDIR}/${WANT_COMPILER_VERSION_FILE} || echo unknown .elif ${WANT_COMPILER_TYPE} == "gcc" WANT_COMPILER_FREEBSD_VERSION_FILE= gnu/usr.bin/cc/cc_tools/freebsd-native.h WANT_COMPILER_FREEBSD_VERSION!= \ awk '$$2 == "FBSD_CC_VER" {printf("%d\n", $$3)}' \ ${SRCDIR}/${WANT_COMPILER_FREEBSD_VERSION_FILE} || echo unknown WANT_COMPILER_VERSION_FILE= contrib/gcc/BASE-VER WANT_COMPILER_VERSION!= \ awk -F. '{print $$1 * 10000 + $$2 * 100 + $$3}' \ ${SRCDIR}/${WANT_COMPILER_VERSION_FILE} || echo unknown .endif .export WANT_COMPILER_FREEBSD_VERSION WANT_COMPILER_VERSION .endif # !defined(WANT_COMPILER_FREEBSD_VERSION) # It needs to be the same revision as we would build for the bootstrap. # If the expected vs CC is different then we can't skip. # GCC cannot be used for cross-arch yet. For clang we pass -target later if # TARGET_ARCH!=MACHINE_ARCH. .if ${MK_SYSTEM_COMPILER} == "yes" && \ defined(WANT_COMPILER_FREEBSD_VERSION) && \ (${MK_CLANG_BOOTSTRAP} == "yes" || ${MK_GCC_BOOTSTRAP} == "yes") && \ !make(xdev*) && \ ${X_COMPILER_TYPE} == ${WANT_COMPILER_TYPE} && \ (${X_COMPILER_TYPE} == "clang" || ${TARGET_ARCH} == ${MACHINE_ARCH}) && \ ${X_COMPILER_VERSION} == ${WANT_COMPILER_VERSION} && \ ${X_COMPILER_FREEBSD_VERSION} == ${WANT_COMPILER_FREEBSD_VERSION} # Everything matches, disable the bootstrap compiler. MK_CLANG_BOOTSTRAP= no MK_GCC_BOOTSTRAP= no USING_SYSTEM_COMPILER= yes .endif # ${WANT_COMPILER_TYPE} == ${COMPILER_TYPE} # WITH_SYSTEM_LD - Pull in needed values and make a decision. # Check if there is a local linker that can satisfy as an external linker. # Which linker is expected to be used? .if ${MK_LLD_BOOTSTRAP} == "yes" WANT_LINKER_TYPE= lld .elif ${MK_BINUTILS_BOOTSTRAP} == "yes" # Note that there's no support for bfd in WITH_SYSTEM_LINKER. WANT_LINKER_TYPE= bfd .else WANT_LINKER_TYPE= .endif .if !defined(WANT_LINKER_FREEBSD_VERSION) && !make(showconfig) && \ !make(test-system-compiler) .if ${WANT_LINKER_TYPE} == "lld" WANT_LINKER_FREEBSD_VERSION_FILE= lib/clang/include/lld/Common/Version.inc WANT_LINKER_FREEBSD_VERSION!= \ awk '$$2 == "LLD_REVISION_STRING" {gsub(/"/, "", $$3); print $$3}' \ ${SRCDIR}/${WANT_LINKER_FREEBSD_VERSION_FILE} || echo unknown WANT_LINKER_VERSION_FILE= lib/clang/include/lld/Common/Version.inc WANT_LINKER_VERSION!= \ awk '$$2 == "LLD_VERSION" {split($$3, a, "."); print a[1] * 10000 + a[2] * 100 + a[3]}' \ ${SRCDIR}/${WANT_LINKER_VERSION_FILE} || echo unknown .else WANT_LINKER_FREEBSD_VERSION_FILE= WANT_LINKER_FREEBSD_VERSION= .endif .export WANT_LINKER_FREEBSD_VERSION WANT_LINKER_VERSION .endif # !defined(WANT_LINKER_FREEBSD_VERSION) .if ${MK_SYSTEM_LINKER} == "yes" && \ defined(WANT_LINKER_FREEBSD_VERSION) && \ (${MK_LLD_BOOTSTRAP} == "yes") && \ !make(xdev*) && \ ${X_LINKER_TYPE} == ${WANT_LINKER_TYPE} && \ ${X_LINKER_VERSION} == ${WANT_LINKER_VERSION} && \ ${X_LINKER_FREEBSD_VERSION} == ${WANT_LINKER_FREEBSD_VERSION} # Everything matches, disable the bootstrap linker. MK_LLD_BOOTSTRAP= no USING_SYSTEM_LINKER= yes .endif # ${WANT_LINKER_TYPE} == ${LINKER_TYPE} # WITH_SYSTEM_COMPILER / WITH_SYSTEM_LINKER - Handle defaults and debug. USING_SYSTEM_COMPILER?= no USING_SYSTEM_LINKER?= no TEST_SYSTEM_COMPILER_VARS= \ USING_SYSTEM_COMPILER MK_SYSTEM_COMPILER \ MK_CROSS_COMPILER MK_CLANG_BOOTSTRAP MK_GCC_BOOTSTRAP \ WANT_COMPILER_TYPE WANT_COMPILER_VERSION WANT_COMPILER_VERSION_FILE \ WANT_COMPILER_FREEBSD_VERSION WANT_COMPILER_FREEBSD_VERSION_FILE \ CC COMPILER_TYPE COMPILER_FEATURES COMPILER_VERSION \ COMPILER_FREEBSD_VERSION \ XCC X_COMPILER_TYPE X_COMPILER_FEATURES X_COMPILER_VERSION \ X_COMPILER_FREEBSD_VERSION TEST_SYSTEM_LINKER_VARS= \ USING_SYSTEM_LINKER MK_SYSTEM_LINKER \ MK_LLD_BOOTSTRAP MK_BINUTILS_BOOTSTRAP \ WANT_LINKER_TYPE WANT_LINKER_VERSION WANT_LINKER_VERSION_FILE \ WANT_LINKER_FREEBSD_VERSION WANT_LINKER_FREEBSD_VERSION_FILE \ LD LINKER_TYPE LINKER_FEATURES LINKER_VERSION \ LINKER_FREEBSD_VERSION \ XLD X_LINKER_TYPE X_LINKER_FEATURES X_LINKER_VERSION \ X_LINKER_FREEBSD_VERSION .for _t in compiler linker test-system-${_t}: .PHONY .for v in ${TEST_SYSTEM_${_t:tu}_VARS} ${_+_}@printf "%-35s= %s\n" "${v}" "${${v}}" .endfor .endfor .if (make(buildworld) || make(buildkernel) || make(kernel-toolchain) || \ make(toolchain) || make(_cross-tools)) .if ${USING_SYSTEM_COMPILER} == "yes" .info SYSTEM_COMPILER: Determined that CC=${CC} matches the source tree. Not bootstrapping a cross-compiler. .elif ${MK_CLANG_BOOTSTRAP} == "yes" .info SYSTEM_COMPILER: libclang will be built for bootstrapping a cross-compiler. .endif .if ${USING_SYSTEM_LINKER} == "yes" .info SYSTEM_LINKER: Determined that LD=${LD} matches the source tree. Not bootstrapping a cross-linker. .elif ${MK_LLD_BOOTSTRAP} == "yes" .info SYSTEM_LINKER: libclang will be built for bootstrapping a cross-linker. .endif .endif # End WITH_SYSTEM_COMPILER / WITH_SYSTEM_LD # Store some compiler metadata for use in installworld where we don't # want to invoke CC at all. _TOOLCHAIN_METADATA_VARS= COMPILER_VERSION \ COMPILER_TYPE \ COMPILER_FEATURES \ COMPILER_FREEBSD_VERSION \ LINKER_VERSION \ LINKER_FEATURES \ LINKER_TYPE \ LINKER_FREEBSD_VERSION toolchain-metadata.mk: .PHONY .META @: > ${.TARGET} @echo ".info Using cached toolchain metadata from build at $$(hostname) on $$(date)" \ > ${.TARGET} @echo "_LOADED_TOOLCHAIN_METADATA=t" >> ${.TARGET} .for v in ${_TOOLCHAIN_METADATA_VARS} @echo "${v}=${${v}}" >> ${.TARGET} @echo "X_${v}=${X_${v}}" >> ${.TARGET} .endfor @echo ".export ${_TOOLCHAIN_METADATA_VARS}" >> ${.TARGET} @echo ".export ${_TOOLCHAIN_METADATA_VARS:C,^,X_,}" >> ${.TARGET} # We must do lib/ and libexec/ before bin/ in case of a mid-install error to # keep the users system reasonably usable. For static->dynamic root upgrades, # we don't want to install a dynamic binary without rtld and the needed # libraries. More commonly, for dynamic root, we don't want to install a # binary that requires a newer library version that hasn't been installed yet. # This ordering is not a guarantee though. The only guarantee of a working # system here would require fine-grained ordering of all components based # on their dependencies. .if !empty(SUBDIR_OVERRIDE) SUBDIR= ${SUBDIR_OVERRIDE} .else SUBDIR= lib libexec # Add LOCAL_LIB_DIRS, but only if they will not be picked up as a SUBDIR # of a LOCAL_DIRS directory. This allows LOCAL_DIRS=foo and # LOCAL_LIB_DIRS=foo/lib to behave as expected. .for _DIR in ${LOCAL_DIRS:M*/} ${LOCAL_DIRS:N*/:S|$|/|} _REDUNDANT_LIB_DIRS+= ${LOCAL_LIB_DIRS:M${_DIR}*} .endfor .for _DIR in ${LOCAL_LIB_DIRS} .if ${_DIR} == ".WAIT" || (empty(_REDUNDANT_LIB_DIRS:M${_DIR}) && exists(${.CURDIR}/${_DIR}/Makefile)) SUBDIR+= ${_DIR} .endif .endfor .if !defined(NO_ROOT) && (make(installworld) || make(install)) # Ensure libraries are installed before progressing. SUBDIR+=.WAIT .endif SUBDIR+=bin .if ${MK_CDDL} != "no" SUBDIR+=cddl .endif SUBDIR+=gnu include .if ${MK_KERBEROS} != "no" SUBDIR+=kerberos5 .endif .if ${MK_RESCUE} != "no" SUBDIR+=rescue .endif SUBDIR+=sbin .if ${MK_CRYPT} != "no" SUBDIR+=secure .endif .if !defined(NO_SHARE) SUBDIR+=share .endif .if ${MK_BOOT} != "no" SUBDIR+=stand .endif SUBDIR+=sys usr.bin usr.sbin .if ${MK_TESTS} != "no" SUBDIR+= tests .endif # Local directories are built in parallel with the base system directories. # Users may insert a .WAIT directive at the beginning or elsewhere within # the LOCAL_DIRS and LOCAL_LIB_DIRS lists as needed. .for _DIR in ${LOCAL_DIRS} .if ${_DIR} == ".WAIT" || exists(${.CURDIR}/${_DIR}/Makefile) SUBDIR+= ${_DIR} .endif .endfor # We must do etc/ last as it hooks into building the man whatis file # by calling 'makedb' in share/man. This is only relevant for # install/distribute so they build the whatis file after every manpage is # installed. .if make(installworld) || make(install) SUBDIR+=.WAIT .endif SUBDIR+=etc .endif # !empty(SUBDIR_OVERRIDE) .if defined(NOCLEAN) .warning NOCLEAN option is deprecated. Use NO_CLEAN instead. NO_CLEAN= ${NOCLEAN} .endif .if defined(NO_CLEANDIR) CLEANDIR= clean cleandepend .else CLEANDIR= cleandir .endif .if defined(WORLDFAST) NO_CLEAN= t NO_OBJWALK= t .endif .if ${MK_META_MODE} == "yes" # If filemon is used then we can rely on the build being incremental-safe. # The .meta files will also track the build command and rebuild should # it change. .if empty(.MAKE.MODE:Mnofilemon) NO_CLEAN= t .endif .endif .if defined(NO_OBJWALK) || ${MK_AUTO_OBJ} == "yes" NO_OBJWALK= t NO_KERNELOBJ= t .endif .if !defined(NO_OBJWALK) _obj= obj .endif LOCAL_TOOL_DIRS?= PACKAGEDIR?= ${DESTDIR}/${DISTDIR} .if empty(SHELL:M*csh*) BUILDENV_SHELL?=${SHELL} .else BUILDENV_SHELL?=/bin/sh .endif .if !defined(_MKSHOWCONFIG) .if !defined(SVN_CMD) || empty(SVN_CMD) . for _P in /usr/bin /usr/local/bin . for _S in svn svnlite . if exists(${_P}/${_S}) SVN_CMD= ${_P}/${_S} . endif . endfor . endfor .export SVN_CMD .endif SVNFLAGS?= -r HEAD .if !defined(VCS_REVISION) || empty(VCS_REVISION) .if !defined(SVNVERSION_CMD) || empty(SVNVERSION_CMD) . for _D in ${PATH:S,:, ,g} . if exists(${_D}/svnversion) SVNVERSION_CMD?=${_D}/svnversion . endif . if exists(${_D}/svnliteversion) SVNVERSION_CMD?=${_D}/svnliteversion . endif . endfor .endif _VCS_REVISION?= $$(eval ${SVNVERSION_CMD} ${SRCDIR}) . if !empty(_VCS_REVISION) VCS_REVISION= $$(echo r${_VCS_REVISION}) . endif .export VCS_REVISION .endif .if !defined(OSRELDATE) .if exists(/usr/include/osreldate.h) OSRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ /usr/include/osreldate.h .else OSRELDATE= 0 .endif .export OSRELDATE .endif # Set VERSION for CTFMERGE to use via the default CTFFLAGS=-L VERSION. .if !defined(_REVISION) _REVISION!= ${MAKE} -C ${SRCDIR}/release MK_AUTO_OBJ=no -V REVISION .export _REVISION .endif .if !defined(_BRANCH) _BRANCH!= ${MAKE} -C ${SRCDIR}/release MK_AUTO_OBJ=no -V BRANCH .export _BRANCH .endif .if !defined(SRCRELDATE) SRCRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ ${SRCDIR}/sys/sys/param.h .export SRCRELDATE .endif .if !defined(VERSION) VERSION= FreeBSD ${_REVISION}-${_BRANCH:C/-p[0-9]+$//} ${TARGET_ARCH} ${SRCRELDATE} .export VERSION .endif .if !defined(PKG_VERSION) .if ${_BRANCH:MSTABLE*} || ${_BRANCH:MCURRENT*} || ${_BRANCH:MALPHA*} TIMENOW= %Y%m%d%H%M%S EXTRA_REVISION= .s${TIMENOW:gmtime} .endif .if ${_BRANCH:M*-p*} EXTRA_REVISION= _${_BRANCH:C/.*-p([0-9]+$)/\1/} .endif PKG_VERSION= ${_REVISION}${EXTRA_REVISION} .endif .endif # !defined(_MKSHOWCONFIG) .if !defined(_MKSHOWCONFIG) _CPUTYPE!= MAKEFLAGS= CPUTYPE=${_TARGET_CPUTYPE} ${MAKE} -f /dev/null \ -m ${.CURDIR}/share/mk MK_AUTO_OBJ=no -V CPUTYPE .if ${_CPUTYPE} != ${_TARGET_CPUTYPE} .error CPUTYPE global should be set with ?=. .endif .endif .if make(buildworld) BUILD_ARCH!= uname -p .if ${MACHINE_ARCH} != ${BUILD_ARCH} .error To cross-build, set TARGET_ARCH. .endif .endif WORLDTMP?= ${OBJTOP}/tmp BPATH= ${CCACHE_WRAPPER_PATH_PFX}${WORLDTMP}/legacy/usr/sbin:${WORLDTMP}/legacy/usr/bin:${WORLDTMP}/legacy/bin XPATH= ${WORLDTMP}/usr/sbin:${WORLDTMP}/usr/bin # When building we want to find the cross tools before the host tools in ${BPATH}. # We also need to add UNIVERSE_TOOLCHAIN_PATH so that we can find the shared # toolchain files (clang, lld, etc.) during make universe/tinderbox STRICTTMPPATH= ${XPATH}:${BPATH}:${UNIVERSE_TOOLCHAIN_PATH} # We should not be using tools from /usr/bin accidentally since this could cause # the build to break on other systems that don't have that tool. For now we # still allow using the old behaviour (inheriting $PATH) if # BUILD_WITH_STRICT_TMPPATH is set to 0 but this will eventually be removed. # Currently strict $PATH can cause build failures and does not work yet with # USING_SYSTEM_LINKER/USING_SYSTEM_COMPILER. Once these issues have been # resolved it will be turned on by default. BUILD_WITH_STRICT_TMPPATH?=0 .if ${BUILD_WITH_STRICT_TMPPATH} != 0 TMPPATH= ${STRICTTMPPATH} .else TMPPATH= ${STRICTTMPPATH}:${PATH} .endif # # Avoid running mktemp(1) unless actually needed. # It may not be functional, e.g., due to new ABI # when in the middle of installing over this system. # .if make(distributeworld) || make(installworld) || make(stageworld) .if ${BUILD_WITH_STRICT_TMPPATH} != 0 MKTEMP=${WORLDTMP}/legacy/usr/bin/mktemp .if !exists(${MKTEMP}) .error "mktemp binary doesn't exist in expected location: ${MKTEMP}" .endif .else MKTEMP=mktemp .endif INSTALLTMP!= ${MKTEMP} -d -u -t install .endif .if make(stagekernel) || make(distributekernel) TAGS+= kernel PACKAGE= kernel .endif # # Building a world goes through the following stages # # 1. legacy stage [BMAKE] # This stage is responsible for creating compatibility # shims that are needed by the bootstrap-tools, # build-tools and cross-tools stages. These are generally # APIs that tools from one of those three stages need to # build that aren't present on the host. # 1. bootstrap-tools stage [BMAKE] # This stage is responsible for creating programs that # are needed for backward compatibility reasons. They # are not built as cross-tools. # 2. build-tools stage [TMAKE] # This stage is responsible for creating the object # tree and building any tools that are needed during # the build process. Some programs are listed during # this phase because they build binaries to generate # files needed to build these programs. This stage also # builds the 'build-tools' target rather than 'all'. # 3. cross-tools stage [XMAKE] # This stage is responsible for creating any tools that # are needed for building the system. A cross-compiler is one # of them. This differs from build tools in two ways: # 1. the 'all' target is built rather than 'build-tools' # 2. these tools are installed into TMPPATH for stage 4. # 4. world stage [WMAKE] # This stage actually builds the world. # 5. install stage (optional) [IMAKE] # This stage installs a previously built world. # BOOTSTRAPPING?= 0 # Keep these in sync MINIMUM_SUPPORTED_OSREL?= 1002501 MINIMUM_SUPPORTED_REL?= 10.3 # Common environment for world related stages CROSSENV+= \ MACHINE_ARCH=${TARGET_ARCH} \ MACHINE=${TARGET} \ CPUTYPE=${TARGET_CPUTYPE} .if ${MK_META_MODE} != "no" # Don't rebuild build-tools targets during normal build. CROSSENV+= BUILD_TOOLS_META=.NOMETA .endif .if defined(TARGET_CFLAGS) CROSSENV+= ${TARGET_CFLAGS} .endif BOOTSTRAPPING_OSRELDATE?=${OSRELDATE} # bootstrap-tools stage BMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \ TOOLS_PREFIX=${TOOLS_PREFIX_UNDEF:U${WORLDTMP}} \ PATH=${BPATH}:${PATH} \ WORLDTMP=${WORLDTMP} \ MAKEFLAGS="-m ${.CURDIR}/tools/build/mk ${.MAKEFLAGS}" # need to keep this in sync with targets/pseudo/bootstrap-tools/Makefile BSARGS= DESTDIR= \ OBJTOP='${WORLDTMP}/obj-tools' \ OBJROOT='$${OBJTOP}/' \ MAKEOBJDIRPREFIX= \ BOOTSTRAPPING=${BOOTSTRAPPING_OSRELDATE} \ BWPHASE=${.TARGET:C,^_,,} \ SSP_CFLAGS= \ MK_HTML=no NO_LINT=yes MK_MAN=no \ -DNO_PIC MK_PROFILE=no -DNO_SHARED \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no \ MK_CLANG_EXTRAS=no MK_CLANG_FULL=no \ MK_LLDB=no MK_RETPOLINE=no MK_TESTS=no \ MK_INCLUDES=yes BMAKE= \ ${BMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ ${BSARGS} .if empty(.MAKEOVERRIDES:MMK_LLVM_TARGET_ALL) BMAKE+= MK_LLVM_TARGET_ALL=no .endif # build-tools stage TMAKE= \ ${BMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ DESTDIR= \ BOOTSTRAPPING=${BOOTSTRAPPING_OSRELDATE} \ BWPHASE=${.TARGET:C,^_,,} \ SSP_CFLAGS= \ -DNO_LINT \ -DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no \ MK_CLANG_EXTRAS=no MK_CLANG_FULL=no \ MK_LLDB=no MK_RETPOLINE=no MK_TESTS=no # cross-tools stage # TOOLS_PREFIX set in BMAKE XMAKE= ${BMAKE} \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ MK_GDB=no MK_TESTS=no # kernel-tools stage KTMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${BPATH}:${PATH} \ WORLDTMP=${WORLDTMP} KTMAKE= \ TOOLS_PREFIX=${TOOLS_PREFIX_UNDEF:U${WORLDTMP}} \ ${KTMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ DESTDIR= \ OBJTOP='${WORLDTMP}/obj-kernel-tools' \ OBJROOT='$${OBJTOP}/' \ MAKEOBJDIRPREFIX= \ BOOTSTRAPPING=${BOOTSTRAPPING_OSRELDATE} \ SSP_CFLAGS= \ MK_HTML=no -DNO_LINT MK_MAN=no \ -DNO_PIC MK_PROFILE=no -DNO_SHARED \ -DNO_CPU_CFLAGS MK_RETPOLINE=no MK_WARNS=no MK_CTF=no # world stage WMAKEENV= ${CROSSENV} \ INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${TMPPATH} \ SYSROOT=${WORLDTMP} # make hierarchy HMAKE= PATH=${TMPPATH} ${MAKE} LOCAL_MTREE=${LOCAL_MTREE:Q} .if defined(NO_ROOT) HMAKE+= PATH=${TMPPATH} METALOG=${METALOG} -DNO_ROOT .endif CROSSENV+= CC="${XCC} ${XCFLAGS}" CXX="${XCXX} ${XCXXFLAGS} ${XCFLAGS}" \ CPP="${XCPP} ${XCFLAGS}" \ AS="${XAS}" AR="${XAR}" LD="${XLD}" LLVM_LINK="${XLLVM_LINK}" \ NM=${XNM} OBJCOPY="${XOBJCOPY}" \ RANLIB=${XRANLIB} STRINGS=${XSTRINGS} \ SIZE="${XSIZE}" .if defined(CROSS_BINUTILS_PREFIX) && exists(${CROSS_BINUTILS_PREFIX}) # In the case of xdev-build tools, CROSS_BINUTILS_PREFIX won't be a # directory, but the compiler will look in the right place for its # tools so we don't need to tell it where to look. BFLAGS+= -B${CROSS_BINUTILS_PREFIX} .endif # The internal bootstrap compiler has a default sysroot set by TOOLS_PREFIX # and target set by TARGET/TARGET_ARCH. However, there are several needs to # always pass an explicit --sysroot and -target. # - External compiler needs sysroot and target flags. # - External ld needs sysroot. # - To be clear about the use of a sysroot when using the internal compiler. # - Easier debugging. # - Allowing WITH_SYSTEM_COMPILER+WITH_META_MODE to work together due to # the flip-flopping build command when sometimes using external and # sometimes using internal. # - Allow using lld which has no support for default paths. .if !defined(CROSS_BINUTILS_PREFIX) || !exists(${CROSS_BINUTILS_PREFIX}) BFLAGS+= -B${WORLDTMP}/usr/bin .endif .if ${WANT_COMPILER_TYPE} == gcc || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc) .elif ${WANT_COMPILER_TYPE} == clang || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == clang) XCFLAGS+= -target ${TARGET_TRIPLE} .endif XCFLAGS+= --sysroot=${WORLDTMP} .if !empty(BFLAGS) XCFLAGS+= ${BFLAGS} .endif .if ${MK_LIB32} != "no" && (${TARGET_ARCH} == "amd64" || \ ${TARGET_ARCH} == "powerpc64" || ${TARGET_ARCH:Mmips64*} != "") LIBCOMPAT= 32 .include "Makefile.libcompat" .elif ${MK_LIBSOFT} != "no" && ${TARGET_ARCH:Marmv[67]*} != "" LIBCOMPAT= SOFT .include "Makefile.libcompat" .endif # META_MODE normally ignores host file changes since every build updates # timestamps (see NO_META_IGNORE_HOST in sys.mk). There are known times # when the ABI breaks though that we want to force rebuilding WORLDTMP # to get updated host tools. .if ${MK_META_MODE} == "yes" && defined(NO_CLEAN) && \ !defined(NO_META_IGNORE_HOST) && !defined(NO_META_IGNORE_HOST_HEADERS) && \ !defined(_MKSHOWCONFIG) # r318736 - ino64 major ABI breakage META_MODE_BAD_ABI_VERS+= 1200031 .if !defined(OBJDIR_HOST_OSRELDATE) .if exists(${OBJTOP}/host-osreldate.h) OBJDIR_HOST_OSRELDATE!= \ awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ ${OBJTOP}/host-osreldate.h .elif exists(${WORLDTMP}/usr/include/osreldate.h) OBJDIR_HOST_OSRELDATE= 0 .endif .export OBJDIR_HOST_OSRELDATE .endif # Note that this logic is the opposite of normal BOOTSTRAP handling. We want # to compare the WORLDTMP's OSRELDATE to the host's OSRELDATE. If the WORLDTMP # is older than the ABI-breakage OSRELDATE of the HOST then we rebuild. .if defined(OBJDIR_HOST_OSRELDATE) .for _ver in ${META_MODE_BAD_ABI_VERS} .if ${OSRELDATE} >= ${_ver} && ${OBJDIR_HOST_OSRELDATE} < ${_ver} _meta_mode_need_rebuild= ${_ver} .endif .endfor .if defined(_meta_mode_need_rebuild) .info META_MODE: Rebuilding host tools due to ABI breakage in __FreeBSD_version ${_meta_mode_need_rebuild}. NO_META_IGNORE_HOST_HEADERS= 1 .export NO_META_IGNORE_HOST_HEADERS .endif # defined(_meta_mode_need_rebuild) .endif # defined(OBJDIR_HOST_OSRELDATE) .endif # ${MK_META_MODE} == "yes" && defined(NO_CLEAN) ... # This is only used for META_MODE+filemon to track what the oldest # __FreeBSD_version is in WORLDTMP. This purposely does NOT have # a make dependency on /usr/include/osreldate.h as the file should # only be copied when it is missing or meta mode determines it has changed. # Since host files are normally ignored without NO_META_IGNORE_HOST # the file will never be updated unless that flag is specified. This # allows tracking the oldest osreldate to force rebuilds via # META_MODE_BADABI_REVS above. host-osreldate.h: # DO NOT ADD /usr/include/osreldate.h here @cp -f /usr/include/osreldate.h ${.TARGET} WMAKE= ${WMAKEENV} ${MAKE} ${WORLD_FLAGS} -f Makefile.inc1 \ BWPHASE=${.TARGET:C,^_,,} \ DESTDIR=${WORLDTMP} IMAKEENV= ${CROSSENV} IMAKE= ${IMAKEENV} ${MAKE} -f Makefile.inc1 \ ${IMAKE_INSTALL} ${IMAKE_MTREE} .if empty(.MAKEFLAGS:M-n) IMAKEENV+= PATH=${STRICTTMPPATH}:${INSTALLTMP} \ LD_LIBRARY_PATH=${INSTALLTMP} \ PATH_LOCALE=${INSTALLTMP}/locale IMAKE+= __MAKE_SHELL=${INSTALLTMP}/sh .else IMAKEENV+= PATH=${TMPPATH}:${INSTALLTMP} .endif # When generating install media, do not allow user and group information from # the build host to affect the contents of the distribution. .if make(distributeworld) || make(distrib-dirs) || make(distribution) DB_FROM_SRC= yes .endif .if defined(DB_FROM_SRC) INSTALLFLAGS+= -N ${.CURDIR}/etc MTREEFLAGS+= -N ${.CURDIR}/etc .endif _INSTALL_DDIR= ${DESTDIR}/${DISTDIR} INSTALL_DDIR= ${_INSTALL_DDIR:S://:/:g:C:/$::} .if defined(NO_ROOT) METALOG?= ${DESTDIR}/${DISTDIR}/METALOG METALOG:= ${METALOG:C,//+,/,g} IMAKE+= -DNO_ROOT METALOG=${METALOG} INSTALLFLAGS+= -U -M ${METALOG} -D ${INSTALL_DDIR} MTREEFLAGS+= -W .endif .if defined(BUILD_PKGS) INSTALLFLAGS+= -h sha256 .endif .if defined(DB_FROM_SRC) || defined(NO_ROOT) IMAKE_INSTALL= INSTALL="install ${INSTALLFLAGS}" IMAKE_MTREE= MTREE_CMD="mtree ${MTREEFLAGS}" .endif DESTDIR_MTREEFLAGS= -deU # When creating worldtmp we don't need to set the directories as owned by root # so we also pass -W WORLDTMP_MTREEFLAGS= -deUW .if defined(NO_ROOT) # When building with -DNO_ROOT we shouldn't be changing the directories # that are created by mtree to be owned by root/wheel. DESTDIR_MTREEFLAGS+= -W .endif MTREE?= mtree .if ${BUILD_WITH_STRICT_TMPPATH} != 0 MTREE= ${WORLDTMP}/legacy/usr/sbin/mtree .endif WORLDTMP_MTREE= ${MTREE} ${WORLDTMP_MTREEFLAGS} DESTDIR_MTREE= ${MTREE} ${DESTDIR_MTREEFLAGS} # kernel stage KMAKEENV= ${WMAKEENV:NSYSROOT=*} KMAKE= ${KMAKEENV} ${MAKE} ${.MAKEFLAGS} ${KERNEL_FLAGS} KERNEL=${INSTKERNNAME} # # buildworld # # Attempt to rebuild the entire system, with reasonable chance of # success, regardless of how old your existing system is. # _sanity_check: .PHONY .MAKE .if ${.CURDIR:C/[^,]//g} != "" # The m4 build of sendmail files doesn't like it if ',' is used # anywhere in the path of it's files. @echo @echo "*** Error: path to source tree contains a comma ','" @echo @false .elif ${.CURDIR:M*\:*} != "" # Using ':' leaks into PATH and breaks finding cross-tools. @echo @echo "*** Error: path to source tree contains a colon ':'" @echo @false .endif # Our current approach to dependency tracking cannot cope with certain source # tree changes, particularly with respect to removing source files and # replacing generated files. Handle these cases here in an ad-hoc fashion. _cleanobj_fast_depend_hack: .PHONY # Syscall stubs rewritten in C and obsolete MD assembly implementations # Date SVN Rev Syscalls # 20180404 r332048 sigreturn # 20180405 r332080 shmat # 20180406 r332119 setlogin # 20180411 r332443 exect # 20180525 r334224 vadvise # 20180604 r334626 brk sbrk .for f in brk exect sbrk setlogin shmat sigreturn vadvise @if [ -e "${OBJTOP}/lib/libc/.depend.${f}.o" ] && \ egrep -qw '${f}\.[sS]' ${OBJTOP}/lib/libc/.depend.${f}.o; then \ echo "Removing stale dependencies for ${f} syscall wrappers"; \ rm -f ${OBJTOP}/lib/libc/.depend.${f}.* \ ${LIBCOMPAT:D${LIBCOMPAT_OBJTOP}/lib/libc/.depend.${f}.*}; \ fi .endfor # 20181013 r339348 bcopy reimplemented as .c .for f in bcopy memcpy memmove @if [ -e "${OBJTOP}/lib/libc/.depend.${f}.o" ] && \ egrep -qw 'bcopy\.[sS]' ${OBJTOP}/lib/libc/.depend.${f}.o; then \ echo "Removing stale dependencies for bcopy"; \ rm -f ${OBJTOP}/lib/libc/.depend.${f}.* \ ${LIBCOMPAT:D${LIBCOMPAT_OBJTOP}/lib/libc/.depend.${f}.*}; \ fi .endfor # 20181009 track migration from ntp's embedded libevent to updated one @if [ -e "${OBJTOP}/usr.sbin/ntp/libntpevent/.depend.bufferevent_openssl.o" ] && \ egrep -q 'contrib/ntp/sntp/libevent/bufferevent_openssl.c' \ ${OBJTOP}/usr.sbin/ntp/libntpevent/.depend.bufferevent_openssl.o ; then \ echo "Removing stale libevent dependencies"; \ rm -f ${OBJTOP}/usr.sbin/ntp/libntpevent/.depend.*; \ fi _worldtmp: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> Rebuilding the temporary build tree" @echo "--------------------------------------------------------------" .if !defined(NO_CLEAN) rm -rf ${WORLDTMP} .else # Note: for delete-old we need to set $PATH to also include the host $PATH # since otherwise a partial build with missing symlinks in ${WORLDTMP}/legacy/ # will fail to run due to missing binaries. $WMAKE sets PATH to only ${TMPPATH} # so we remove that assingnment from $WMAKE and prepend the new $PATH ${_+_}@if [ -e "${WORLDTMP}" ]; then \ echo ">>> Deleting stale files in build tree..."; \ cd ${.CURDIR}; env PATH=${TMPPATH}:${PATH} ${WMAKE:NPATH=*} \ _NO_INCLUDE_COMPILERMK=t -DBATCH_DELETE_OLD_FILES delete-old \ delete-old-libs >/dev/null; \ fi rm -rf ${WORLDTMP}/legacy/usr/include .if ${USING_SYSTEM_COMPILER} == "yes" .for cc in cc c++ if [ -x ${WORLDTMP}/usr/bin/${cc} ]; then \ inum=$$(stat -f %i ${WORLDTMP}/usr/bin/${cc}); \ find ${WORLDTMP}/usr/bin -inum $${inum} -delete; \ fi .endfor .endif # ${USING_SYSTEM_COMPILER} == "yes" .if ${USING_SYSTEM_LINKER} == "yes" @rm -f ${WORLDTMP}/usr/bin/ld ${WORLDTMP}/usr/bin/ld.lld .endif # ${USING_SYSTEM_LINKER} == "yes" .endif # !defined(NO_CLEAN) @mkdir -p ${WORLDTMP} @touch ${WORLDTMP}/${.TARGET} # We can't use mtree to create the worldtmp directories since it may not be # available on the target system (this happens e.g. when building on non-FreeBSD) cd ${.CURDIR}/tools/build; \ ${MAKE} DIRPRFX=tools/build/ DESTDIR=${WORLDTMP}/legacy installdirs # In order to build without inheriting $PATH we need to add symlinks to the host # tools in $WORLDTMP for the tools that we don't build during bootstrap-tools cd ${.CURDIR}/tools/build; \ ${MAKE} DIRPRFX=tools/build/ DESTDIR=${WORLDTMP}/legacy host-symlinks _legacy: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1.1: legacy release compatibility shims" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${BMAKE} legacy _bootstrap-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1.2: bootstrap tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${BMAKE} bootstrap-tools mkdir -p ${WORLDTMP}/usr ${WORLDTMP}/lib/casper ${WORLDTMP}/lib/geom ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${WORLDTMP}/usr >/dev/null ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${WORLDTMP}/usr/include >/dev/null ln -sf ${.CURDIR}/sys ${WORLDTMP} .if ${MK_DEBUG_FILES} != "no" ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${WORLDTMP}/usr/lib >/dev/null .endif .for _mtree in ${LOCAL_MTREE} ${WORLDTMP_MTREE} -f ${.CURDIR}/${_mtree} -p ${WORLDTMP} > /dev/null .endfor _cleanobj: .if !defined(NO_CLEAN) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.1: cleaning up the object tree" @echo "--------------------------------------------------------------" # Avoid including bsd.compiler.mk in clean and obj with _NO_INCLUDE_COMPILERMK # since the restricted $PATH might not contain a valid cc binary ${_+_}cd ${.CURDIR}; ${WMAKE} _NO_INCLUDE_COMPILERMK=t ${CLEANDIR} .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${LIBCOMPATWMAKE} _NO_INCLUDE_COMPILERMK=t -f Makefile.inc1 ${CLEANDIR} .endif .else ${_+_}cd ${.CURDIR}; ${WMAKE} _NO_INCLUDE_COMPILERMK=t _cleanobj_fast_depend_hack .endif # !defined(NO_CLEAN) _obj: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.2: rebuilding the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} _NO_INCLUDE_COMPILERMK=t obj _build-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.3: build tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${TMAKE} build-tools _cross-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3: cross tools" @echo "--------------------------------------------------------------" @rm -f ${OBJTOP}/toolchain-metadata.mk ${_+_}cd ${.CURDIR}; ${XMAKE} cross-tools ${_+_}cd ${.CURDIR}; ${XMAKE} kernel-tools _build-metadata: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3.1: recording build metadata" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} toolchain-metadata.mk ${_+_}cd ${.CURDIR}; ${WMAKE} host-osreldate.h _includes: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.1: building includes" @echo "--------------------------------------------------------------" # Special handling for SUBDIR_OVERRIDE in buildworld as they most likely need # headers from default SUBDIR. Do SUBDIR_OVERRIDE includes last. ${_+_}cd ${.CURDIR}; ${WMAKE} SUBDIR_OVERRIDE= SHARED=symlinks \ MK_INCLUDES=yes includes .if !empty(SUBDIR_OVERRIDE) && make(buildworld) ${_+_}cd ${.CURDIR}; ${WMAKE} MK_INCLUDES=yes SHARED=symlinks includes .endif _libraries: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.2: building libraries" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; \ ${WMAKE} -DNO_FSCHG MK_HTML=no -DNO_LINT MK_MAN=no \ MK_PROFILE=no MK_TESTS=no MK_TESTS_SUPPORT=${MK_TESTS} libraries everything: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.3: building everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; _PARALLEL_SUBDIR_OK=1 ${WMAKE} all WMAKE_TGTS= .if !defined(WORLDFAST) WMAKE_TGTS+= _sanity_check _worldtmp _legacy .if empty(SUBDIR_OVERRIDE) WMAKE_TGTS+= _bootstrap-tools .endif WMAKE_TGTS+= _cleanobj .if !defined(NO_OBJWALK) WMAKE_TGTS+= _obj .endif WMAKE_TGTS+= _build-tools _cross-tools WMAKE_TGTS+= _build-metadata WMAKE_TGTS+= _includes .endif .if !defined(NO_LIBS) WMAKE_TGTS+= _libraries .endif WMAKE_TGTS+= everything .if defined(LIBCOMPAT) && empty(SUBDIR_OVERRIDE) WMAKE_TGTS+= build${libcompat} .endif buildworld: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue .PHONY .ORDER: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue buildworld_prologue: .PHONY @echo "--------------------------------------------------------------" @echo ">>> World build started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" buildworld_epilogue: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> World build completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" # # We need to have this as a target because the indirection between Makefile # and Makefile.inc1 causes the correct PATH to be used, rather than a # modification of the current environment's PATH. In addition, we need # to quote multiword values. # buildenvvars: .PHONY @echo ${WMAKEENV:Q} ${.MAKE.EXPORTED:@v@$v=\"${$v}\"@} .if ${.TARGETS:Mbuildenv} .if ${.MAKEFLAGS:M-j} .error The buildenv target is incompatible with -j .endif .endif BUILDENV_DIR?= ${.CURDIR} # # Note: make will report any errors the shell reports. This can # be odd if the last command in an interactive shell generates an # error or is terminated by SIGINT. These reported errors look bad, # but are harmless. Allowing them also allows BUIDLENV_SHELL to # be a complex command whose status will be returned to the caller. # Some scripts in tools rely on this behavior to report build errors. # buildenv: .PHONY @echo Entering world for ${TARGET_ARCH}:${TARGET} .if ${BUILDENV_SHELL:M*zsh*} @echo For ZSH you must run: export CPUTYPE=${TARGET_CPUTYPE} .endif @cd ${BUILDENV_DIR} && env ${WMAKEENV} BUILDENV=1 ${BUILDENV_SHELL} TOOLCHAIN_TGTS= ${WMAKE_TGTS:Neverything:Nbuild${libcompat}} toolchain: ${TOOLCHAIN_TGTS} .PHONY KERNEL_TOOLCHAIN_TGTS= ${TOOLCHAIN_TGTS:N_obj:N_cleanobj:N_includes:N_libraries} .if make(kernel-toolchain) .ORDER: ${KERNEL_TOOLCHAIN_TGTS} .endif kernel-toolchain: ${KERNEL_TOOLCHAIN_TGTS} .PHONY # # installcheck # # Checks to be sure system is ready for installworld/installkernel. # installcheck: _installcheck_world _installcheck_kernel .PHONY _installcheck_world: .PHONY @echo "--------------------------------------------------------------" @echo ">>> Install check world" @echo "--------------------------------------------------------------" _installcheck_kernel: .PHONY @echo "--------------------------------------------------------------" @echo ">>> Install check kernel" @echo "--------------------------------------------------------------" # # Require DESTDIR to be set if installing for a different architecture or # using the user/group database in the source tree. # .if ${TARGET_ARCH} != ${MACHINE_ARCH} || ${TARGET} != ${MACHINE} || \ defined(DB_FROM_SRC) .if !make(distributeworld) _installcheck_world: __installcheck_DESTDIR _installcheck_kernel: __installcheck_DESTDIR __installcheck_DESTDIR: .PHONY .if !defined(DESTDIR) || empty(DESTDIR) @echo "ERROR: Please set DESTDIR!"; \ false .endif .endif .endif .if !defined(DB_FROM_SRC) # # Check for missing UIDs/GIDs. # CHECK_UIDS= auditdistd CHECK_GIDS= audit CHECK_UIDS+= ntpd CHECK_GIDS+= ntpd CHECK_UIDS+= proxy CHECK_GIDS+= proxy authpf CHECK_UIDS+= smmsp CHECK_GIDS+= smmsp CHECK_UIDS+= unbound CHECK_GIDS+= unbound _installcheck_world: __installcheck_UGID __installcheck_UGID: .PHONY .for uid in ${CHECK_UIDS} @if ! `id -u ${uid} >/dev/null 2>&1`; then \ echo "ERROR: Required ${uid} user is missing, see /usr/src/UPDATING."; \ false; \ fi .endfor .for gid in ${CHECK_GIDS} @if ! `find / -prune -group ${gid} >/dev/null 2>&1`; then \ echo "ERROR: Required ${gid} group is missing, see /usr/src/UPDATING."; \ false; \ fi .endfor .endif # # If installing over the running system (DESTDIR is / or unset) and the install # includes rescue, try running rescue from the objdir as a sanity check. If # rescue is not functional (e.g., because it depends on a system call not # supported by the currently running kernel), abort the installation. # .if !make(distributeworld) && ${MK_RESCUE} != "no" && \ (empty(DESTDIR) || ${DESTDIR} == "/") && empty(BYPASS_INSTALLCHECK_SH) _installcheck_world: __installcheck_sh_check __installcheck_sh_check: .PHONY @if [ "`${OBJTOP}/rescue/rescue/rescue sh -c 'echo OK'`" != \ OK ]; then \ echo "rescue/sh check failed, installation aborted" >&2; \ false; \ fi .endif # # Required install tools to be saved in a scratch dir for safety. # .if ${MK_ZONEINFO} != "no" _zoneinfo= zic tzsetup .endif ITOOLS= [ awk cap_mkdb cat chflags chmod chown cmp cp \ date echo egrep find grep id install ${_install-info} \ ln make mkdir mtree mv pwd_mkdb \ rm sed services_mkdb sh sort strip sysctl test true uname wc ${_zoneinfo} \ ${LOCAL_ITOOLS} # Needed for share/man .if ${MK_MAN_UTILS} != "no" ITOOLS+=makewhatis .endif # # distributeworld # # Distributes everything compiled by a `buildworld'. # # installworld # # Installs everything compiled by a 'buildworld'. # # Non-base distributions produced by the base system -EXTRA_DISTRIBUTIONS= doc +EXTRA_DISTRIBUTIONS= .if defined(LIBCOMPAT) EXTRA_DISTRIBUTIONS+= lib${libcompat} .endif .if ${MK_TESTS} != "no" EXTRA_DISTRIBUTIONS+= tests .endif DEBUG_DISTRIBUTIONS= .if ${MK_DEBUG_FILES} != "no" -DEBUG_DISTRIBUTIONS+= base ${EXTRA_DISTRIBUTIONS:S,doc,,:S,tests,,} +DEBUG_DISTRIBUTIONS+= base ${EXTRA_DISTRIBUTIONS:S,tests,,} .endif MTREE_MAGIC?= mtree 2.0 distributeworld installworld stageworld: _installcheck_world .PHONY mkdir -p ${INSTALLTMP} progs=$$(for prog in ${ITOOLS}; do \ if progpath=`which $$prog`; then \ echo $$progpath; \ else \ echo "Required tool $$prog not found in PATH." >&2; \ exit 1; \ fi; \ done); \ libs=$$(ldd -f "%o %p\n" -f "%o %p\n" $$progs 2>/dev/null | sort -u | \ while read line; do \ set -- $$line; \ if [ "$$2 $$3" != "not found" ]; then \ echo $$2; \ else \ echo "Required library $$1 not found." >&2; \ exit 1; \ fi; \ done); \ cp $$libs $$progs ${INSTALLTMP} cp -R $${PATH_LOCALE:-"/usr/share/locale"} ${INSTALLTMP}/locale .if defined(NO_ROOT) -mkdir -p ${METALOG:H} echo "#${MTREE_MAGIC}" > ${METALOG} .endif .if make(distributeworld) .for dist in ${EXTRA_DISTRIBUTIONS} -mkdir ${DESTDIR}/${DISTDIR}/${dist} ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.root.dist \ -p ${DESTDIR}/${DISTDIR}/${dist} >/dev/null ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/include >/dev/null .if ${MK_DEBUG_FILES} != "no" ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib >/dev/null .endif .if defined(LIBCOMPAT) ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null .if ${MK_DEBUG_FILES} != "no" ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib/debug/usr >/dev/null .endif .endif .if ${MK_TESTS} != "no" && ${dist} == "tests" -mkdir -p ${DESTDIR}/${DISTDIR}/${dist}${TESTSBASE} ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}${TESTSBASE} >/dev/null .if ${MK_DEBUG_FILES} != "no" ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/lib/debug/${TESTSBASE} >/dev/null .endif .endif .if defined(NO_ROOT) ${IMAKEENV} ${MTREE} -C -f ${.CURDIR}/etc/mtree/BSD.root.dist | \ sed -e 's#^\./#./${dist}/#' >> ${METALOG} ${IMAKEENV} ${MTREE} -C -f ${.CURDIR}/etc/mtree/BSD.usr.dist | \ sed -e 's#^\./#./${dist}/usr/#' >> ${METALOG} ${IMAKEENV} ${MTREE} -C -f ${.CURDIR}/etc/mtree/BSD.include.dist | \ sed -e 's#^\./#./${dist}/usr/include/#' >> ${METALOG} .if defined(LIBCOMPAT) ${IMAKEENV} ${MTREE} -C -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist | \ sed -e 's#^\./#./${dist}/usr/#' >> ${METALOG} .endif .endif .endfor -mkdir ${DESTDIR}/${DISTDIR}/base ${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ METALOG=${METALOG} ${IMAKE_INSTALL} ${IMAKE_MTREE} \ DISTBASE=/base DESTDIR=${DESTDIR}/${DISTDIR}/base \ LOCAL_MTREE=${LOCAL_MTREE:Q} distrib-dirs ${INSTALL_SYMLINK} ${INSTALLFLAGS} usr/src/sys ${INSTALL_DDIR}/base/sys .endif ${_+_}cd ${.CURDIR}; ${IMAKE} re${.TARGET:S/world$//}; \ ${IMAKEENV} rm -rf ${INSTALLTMP} .if make(distributeworld) .for dist in ${EXTRA_DISTRIBUTIONS} find ${DESTDIR}/${DISTDIR}/${dist} -mindepth 1 -type d -empty -delete .endfor .if defined(NO_ROOT) .for dist in base ${EXTRA_DISTRIBUTIONS} @# For each file that exists in this dist, print the corresponding @# line from the METALOG. This relies on the fact that @# a line containing only the filename will sort immediately before @# the relevant mtree line. cd ${DESTDIR}/${DISTDIR}; \ find ./${dist} | sort -u ${METALOG} - | \ awk 'BEGIN { print "#${MTREE_MAGIC}" } !/ type=/ { file = $$1 } / type=/ { if ($$1 == file) { sub(/^\.\/${dist}\//, "./"); print } }' > \ ${DESTDIR}/${DISTDIR}/${dist}.meta .endfor .for dist in ${DEBUG_DISTRIBUTIONS} @# For each file that exists in this dist, print the corresponding @# line from the METALOG. This relies on the fact that @# a line containing only the filename will sort immediately before @# the relevant mtree line. cd ${DESTDIR}/${DISTDIR}; \ find ./${dist}/usr/lib/debug | sort -u ${METALOG} - | \ awk 'BEGIN { print "#${MTREE_MAGIC}" } !/ type=/ { file = $$1 } / type=/ { if ($$1 == file) { sub(/^\.\/${dist}\//, "./"); print } }' > \ ${DESTDIR}/${DISTDIR}/${dist}.debug.meta .endfor .endif .endif packageworld: .PHONY .for dist in base ${EXTRA_DISTRIBUTIONS} .if defined(NO_ROOT) ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - --exclude usr/lib/debug \ @${DESTDIR}/${DISTDIR}/${dist}.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}.txz .else ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - --exclude usr/lib/debug . | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}.txz .endif .endfor .for dist in ${DEBUG_DISTRIBUTIONS} . if defined(NO_ROOT) ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvf - @${DESTDIR}/${DISTDIR}/${dist}.debug.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}-dbg.txz . else ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvLf - usr/lib/debug | \ ${XZ_CMD} > ${PACKAGEDIR}/${dist}-dbg.txz . endif .endfor _sysent_dirs= sys/kern _sysent_dirs+= sys/compat/freebsd32 _sysent_dirs+= sys/i386/ibcs2 _sysent_dirs+= sys/amd64/linux \ sys/amd64/linux32 \ sys/arm64/linux \ sys/i386/linux sysent: .PHONY .for _dir in ${_sysent_dirs} ${_+_}${MAKE} -C ${.CURDIR}/${_dir} sysent .endfor # # reinstall # # If you have a build server, you can NFS mount the source and obj directories # and do a 'make reinstall' on the *client* to install new binaries from the # most recent server build. # restage reinstall: .MAKE .PHONY @echo "--------------------------------------------------------------" @echo ">>> Making hierarchy" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 \ LOCAL_MTREE=${LOCAL_MTREE:Q} hierarchy .if make(restage) @echo "--------------------------------------------------------------" @echo ">>> Making distribution" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 \ LOCAL_MTREE=${LOCAL_MTREE:Q} distribution .endif @echo @echo "--------------------------------------------------------------" @echo ">>> Installing everything started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install${libcompat} .endif @echo "--------------------------------------------------------------" @echo ">>> Installing everything completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" redistribute: .MAKE .PHONY @echo "--------------------------------------------------------------" @echo ">>> Distributing everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute .if defined(LIBCOMPAT) ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute${libcompat} \ DISTRIBUTION=lib${libcompat} .endif distrib-dirs distribution: .MAKE .PHONY ${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ ${IMAKE_INSTALL} ${IMAKE_MTREE} METALOG=${METALOG} ${.TARGET} .if make(distribution) ${_+_}cd ${.CURDIR}; ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} -f Makefile.inc1 ${IMAKE_INSTALL} \ METALOG=${METALOG} MK_TESTS=no installconfig .endif # # buildkernel and installkernel # # Which kernels to build and/or install is specified by setting # KERNCONF. If not defined a GENERIC kernel is built/installed. # Only the existing (depending TARGET) config files are used # for building kernels and only the first of these is designated # as the one being installed. # # Note that we have to use TARGET instead of TARGET_ARCH when # we're in kernel-land. Since only TARGET_ARCH is (expected) to # be set to cross-build, we have to make sure TARGET is set # properly. .if defined(KERNFAST) NO_KERNELCLEAN= t NO_KERNELCONFIG= t NO_KERNELOBJ= t # Shortcut for KERNCONF=Blah -DKERNFAST is now KERNFAST=Blah .if !defined(KERNCONF) && ${KERNFAST} != "1" KERNCONF=${KERNFAST} .endif .endif .if ${TARGET_ARCH} == "powerpc64" KERNCONF?= GENERIC64 .else KERNCONF?= GENERIC .endif INSTKERNNAME?= kernel KERNSRCDIR?= ${.CURDIR}/sys KRNLCONFDIR= ${KERNSRCDIR}/${TARGET}/conf KRNLOBJDIR= ${OBJTOP}${KERNSRCDIR:C,^${.CURDIR},,} KERNCONFDIR?= ${KRNLCONFDIR} BUILDKERNELS= INSTALLKERNEL= .if defined(NO_INSTALLKERNEL) # All of the BUILDKERNELS loops start at index 1. BUILDKERNELS+= dummy .endif .for _kernel in ${KERNCONF} .if !defined(_MKSHOWCONFIG) && exists(${KERNCONFDIR}/${_kernel}) BUILDKERNELS+= ${_kernel} .if empty(INSTALLKERNEL) && !defined(NO_INSTALLKERNEL) INSTALLKERNEL= ${_kernel} .endif .else .if make(buildkernel) .error Missing KERNCONF ${KERNCONFDIR}/${_kernel} .endif .endif .endfor _cleankernobj_fast_depend_hack: .PHONY # 20180320 remove stale generated assym.s after renaming to .inc in r331254 @if [ -e "${OBJTOP}/sys/${KERNCONF}/assym.s" ]; then \ echo "Removing stale generated assym files"; \ rm -f ${OBJTOP}/sys/${KERNCONF}/assym.* \ ${OBJTOP}/sys/${KERNCONF}/.depend.assym.*; \ fi ${WMAKE_TGTS:N_worldtmp:Nbuild${libcompat}} ${.ALLTARGETS:M_*:N_worldtmp}: .MAKE .PHONY # # buildkernel # # Builds all kernels defined by BUILDKERNELS. # buildkernel: .MAKE .PHONY .if empty(BUILDKERNELS:Ndummy) @echo "ERROR: Missing kernel configuration file(s) (${KERNCONF})."; \ false .endif @echo .for _kernel in ${BUILDKERNELS:Ndummy} @echo "--------------------------------------------------------------" @echo ">>> Kernel build for ${_kernel} started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" @echo "===> ${_kernel}" mkdir -p ${KRNLOBJDIR} .if !defined(NO_KERNELCONFIG) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1: configuring the kernel" @echo "--------------------------------------------------------------" cd ${KRNLCONFDIR}; \ PATH=${TMPPATH} \ config ${CONFIGARGS} -d ${KRNLOBJDIR}/${_kernel} \ -I '${KERNCONFDIR}' '${KERNCONFDIR}/${_kernel}' .endif .if !defined(NO_CLEAN) && !defined(NO_KERNELCLEAN) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.1: cleaning up the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} ${CLEANDIR} .else ${_+_}cd ${.CURDIR}; ${WMAKE} _cleankernobj_fast_depend_hack .endif .if !defined(NO_KERNELOBJ) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.2: rebuilding the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} obj .endif @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.3: build tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${KTMAKE} kernel-tools @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3.1: building everything" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} all -DNO_MODULES_OBJ @echo "--------------------------------------------------------------" @echo ">>> Kernel build for ${_kernel} completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" .endfor NO_INSTALLEXTRAKERNELS?= yes # # installkernel, etc. # # Install the kernel defined by INSTALLKERNEL # installkernel installkernel.debug \ reinstallkernel reinstallkernel.debug: _installcheck_kernel .PHONY .if !defined(NO_INSTALLKERNEL) .if empty(INSTALLKERNEL) @echo "ERROR: No kernel \"${KERNCONF}\" to install."; \ false .endif @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${INSTALLKERNEL} on $$(LC_ALL=C date)" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \ ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} ${IMAKE_INSTALL} KERNEL=${INSTKERNNAME} ${.TARGET:S/kernel//} @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${INSTALLKERNEL} completed on $$(LC_ALL=C date)" @echo "--------------------------------------------------------------" .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${_kernel} $$(LC_ALL=C date)" @echo "--------------------------------------------------------------" ${_+_}cd ${KRNLOBJDIR}/${_kernel}; \ ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} ${IMAKE_INSTALL} KERNEL=${INSTKERNNAME}.${_kernel} ${.TARGET:S/kernel//} @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${_kernel} completed on $$(LC_ALL=C date)" @echo "--------------------------------------------------------------" .endfor .endif distributekernel distributekernel.debug: .PHONY .if !defined(NO_INSTALLKERNEL) .if empty(INSTALLKERNEL) @echo "ERROR: No kernel \"${KERNCONF}\" to install."; \ false .endif mkdir -p ${DESTDIR}/${DISTDIR} .if defined(NO_ROOT) @echo "#${MTREE_MAGIC}" > ${DESTDIR}/${DISTDIR}/kernel.premeta .endif ${_+_}cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \ ${IMAKEENV} ${IMAKE_INSTALL:S/METALOG/kernel.premeta/} \ ${IMAKE_MTREE} PATH=${TMPPATH} ${MAKE} KERNEL=${INSTKERNNAME} \ DESTDIR=${INSTALL_DDIR}/kernel \ ${.TARGET:S/distributekernel/install/} .if defined(NO_ROOT) @sed -e 's|^./kernel|.|' ${DESTDIR}/${DISTDIR}/kernel.premeta > \ ${DESTDIR}/${DISTDIR}/kernel.meta .endif .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} .if defined(NO_ROOT) @echo "#${MTREE_MAGIC}" > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.premeta .endif ${_+_}cd ${KRNLOBJDIR}/${_kernel}; \ ${IMAKEENV} ${IMAKE_INSTALL:S/METALOG/kernel.${_kernel}.premeta/} \ ${IMAKE_MTREE} PATH=${TMPPATH} ${MAKE} \ KERNEL=${INSTKERNNAME}.${_kernel} \ DESTDIR=${INSTALL_DDIR}/kernel.${_kernel} \ ${.TARGET:S/distributekernel/install/} .if defined(NO_ROOT) @sed -e "s|^./kernel.${_kernel}|.|" \ ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.premeta > \ ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta .endif .endfor .endif packagekernel: .PHONY .if defined(NO_ROOT) .if !defined(NO_INSTALLKERNEL) cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --exclude '*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.txz .endif .if ${MK_DEBUG_FILES} != "no" cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --include '*/*/*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.meta | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel-dbg.txz .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --exclude '*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.${_kernel}.txz .if ${MK_DEBUG_FILES} != "no" cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --include '*/*/*.debug' \ @${DESTDIR}/${DISTDIR}/kernel.${_kernel}.meta | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}-dbg.txz .endif .endfor .endif .else .if !defined(NO_INSTALLKERNEL) cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --exclude '*.debug' . | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.txz .endif .if ${MK_DEBUG_FILES} != "no" cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvf - --include '*/*/*.debug' $$(eval find .) | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel-dbg.txz .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" .for _kernel in ${BUILDKERNELS:[2..-1]} cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --exclude '*.debug' . | \ ${XZ_CMD} > ${PACKAGEDIR}/kernel.${_kernel}.txz .if ${MK_DEBUG_FILES} != "no" cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvf - --include '*/*/*.debug' $$(eval find .) | \ ${XZ_CMD} > ${DESTDIR}/${DISTDIR}/kernel.${_kernel}-dbg.txz .endif .endfor .endif .endif stagekernel: .PHONY ${_+_}${MAKE} -C ${.CURDIR} ${.MAKEFLAGS} distributekernel PORTSDIR?= /usr/ports WSTAGEDIR?= ${OBJTOP}/worldstage KSTAGEDIR?= ${OBJTOP}/kernelstage REPODIR?= ${OBJROOT}repo PKGSIGNKEY?= # empty .ORDER: stage-packages create-packages .ORDER: create-packages create-world-packages .ORDER: create-packages create-kernel-packages .ORDER: create-packages sign-packages _pkgbootstrap: .PHONY .if make(*package*) && !exists(${LOCALBASE}/sbin/pkg) @env ASSUME_ALWAYS_YES=YES pkg bootstrap .endif packages: .PHONY ${_+_}${MAKE} -C ${.CURDIR} PKG_VERSION=${PKG_VERSION} real-packages package-pkg: .PHONY rm -rf /tmp/ports.${TARGET} || : env ${WMAKEENV:Q} SRCDIR=${.CURDIR} PORTSDIR=${PORTSDIR} REVISION=${_REVISION} \ PKG_CMD=${PKG_CMD} PKG_VERSION=${PKG_VERSION} REPODIR=${REPODIR} \ WSTAGEDIR=${WSTAGEDIR} \ sh ${.CURDIR}/release/scripts/make-pkg-package.sh real-packages: stage-packages create-packages sign-packages .PHONY stage-packages-world: .PHONY @mkdir -p ${WSTAGEDIR} ${_+_}@cd ${.CURDIR}; \ ${MAKE} DESTDIR=${WSTAGEDIR} -DNO_ROOT stageworld stage-packages-kernel: .PHONY @mkdir -p ${KSTAGEDIR} ${_+_}@cd ${.CURDIR}; \ ${MAKE} DESTDIR=${KSTAGEDIR} -DNO_ROOT stagekernel stage-packages: .PHONY stage-packages-world stage-packages-kernel _repodir: .PHONY @mkdir -p ${REPODIR} create-packages-world: _pkgbootstrap _repodir .PHONY ${_+_}@cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 \ DESTDIR=${WSTAGEDIR} \ PKG_VERSION=${PKG_VERSION} create-world-packages create-packages-kernel: _pkgbootstrap _repodir .PHONY ${_+_}@cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 \ DESTDIR=${KSTAGEDIR} \ PKG_VERSION=${PKG_VERSION} DISTDIR=kernel \ create-kernel-packages create-packages: .PHONY create-packages-world create-packages-kernel create-world-packages: _pkgbootstrap .PHONY @rm -f ${WSTAGEDIR}/*.plist 2>/dev/null || : @cd ${WSTAGEDIR} ; \ env -i LC_COLLATE=C sort ${WSTAGEDIR}/${DISTDIR}/METALOG | \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk @for plist in ${WSTAGEDIR}/*.plist; do \ plist=$${plist##*/} ; \ pkgname=$${plist%.plist} ; \ echo "_PKGS+= $${pkgname}" ; \ done > ${WSTAGEDIR}/packages.mk ${_+_}@cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 create-world-packages-jobs \ .MAKE.JOB.PREFIX= .if make(create-world-packages-jobs) .include "${WSTAGEDIR}/packages.mk" .endif create-world-packages-jobs: .PHONY .for pkgname in ${_PKGS} create-world-packages-jobs: create-world-package-${pkgname} create-world-package-${pkgname}: .PHONY @sh ${SRCDIR}/release/packages/generate-ucl.sh -o ${pkgname} \ -s ${SRCDIR} -u ${WSTAGEDIR}/${pkgname}.ucl @awk -F\" ' \ /^name/ { printf("===> Creating %s-", $$2); next } \ /^version/ { print $$2; next } \ ' ${WSTAGEDIR}/${pkgname}.ucl @if [ "${pkgname}" == "runtime" ]; then \ sed -i '' -e "s/%VCS_REVISION%/${VCS_REVISION}/" ${WSTAGEDIR}/${pkgname}.ucl ; \ fi ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${WSTAGEDIR}/${pkgname}.ucl \ -p ${WSTAGEDIR}/${pkgname}.plist \ -r ${WSTAGEDIR} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} .endfor _default_flavor= -default .if make(*package*) && exists(${KSTAGEDIR}/kernel.meta) . if ${MK_DEBUG_FILES} != "no" _debug=-debug . endif create-kernel-packages: .PHONY . for flavor in "" ${_debug} create-kernel-packages: create-kernel-packages-flavor${flavor:C,^""$,${_default_flavor},} create-kernel-packages-flavor${flavor:C,^""$,${_default_flavor},}: _pkgbootstrap .PHONY @cd ${KSTAGEDIR}/${DISTDIR} ; \ env -i LC_COLLATE=C sort ${KSTAGEDIR}/kernel.meta | \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \ -v kernel=yes -v _kernconf=${INSTALLKERNEL} ; \ sed -e "s/%VERSION%/${PKG_VERSION}/" \ -e "s/%PKGNAME%/kernel-${INSTALLKERNEL:tl}${flavor}/" \ -e "s/%KERNELDIR%/kernel/" \ -e "s/%COMMENT%/FreeBSD ${INSTALLKERNEL} kernel ${flavor}/" \ -e "s/%DESC%/FreeBSD ${INSTALLKERNEL} kernel ${flavor}/" \ -e "s/ %VCS_REVISION%/${VCS_REVISION}/" \ ${SRCDIR}/release/packages/kernel.ucl \ > ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl ; \ awk -F\" ' \ /name/ { printf("===> Creating %s-", $$2); next } \ /version/ {print $$2; next } ' \ ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.ucl \ -p ${KSTAGEDIR}/${DISTDIR}/kernel.${INSTALLKERNEL}${flavor}.plist \ -r ${KSTAGEDIR}/${DISTDIR} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} . endfor .endif .if ${BUILDKERNELS:[#]} > 1 && ${NO_INSTALLEXTRAKERNELS} != "yes" . for _kernel in ${BUILDKERNELS:[2..-1]} . if exists(${KSTAGEDIR}/kernel.${_kernel}.meta) . if ${MK_DEBUG_FILES} != "no" _debug=-debug . endif . for flavor in "" ${_debug} create-kernel-packages: create-kernel-packages-extra-flavor${flavor:C,^""$,${_default_flavor},}-${_kernel} create-kernel-packages-extra-flavor${flavor:C,^""$,${_default_flavor},}-${_kernel}: _pkgbootstrap .PHONY @cd ${KSTAGEDIR}/kernel.${_kernel} ; \ env -i LC_COLLATE=C sort ${KSTAGEDIR}/kernel.${_kernel}.meta | \ awk -f ${SRCDIR}/release/scripts/mtree-to-plist.awk \ -v kernel=yes -v _kernconf=${_kernel} ; \ sed -e "s/%VERSION%/${PKG_VERSION}/" \ -e "s/%PKGNAME%/kernel-${_kernel:tl}${flavor}/" \ -e "s/%KERNELDIR%/kernel.${_kernel}/" \ -e "s/%COMMENT%/FreeBSD ${_kernel} kernel ${flavor}/" \ -e "s/%DESC%/FreeBSD ${_kernel} kernel ${flavor}/" \ -e "s/ %VCS_REVISION%/${VCS_REVISION}/" \ ${SRCDIR}/release/packages/kernel.ucl \ > ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl ; \ awk -F\" ' \ /name/ { printf("===> Creating %s-", $$2); next } \ /version/ {print $$2; next } ' \ ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh -o ALLOW_BASE_SHLIBS=yes \ create -M ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.ucl \ -p ${KSTAGEDIR}/kernel.${_kernel}/kernel.${_kernel}${flavor}.plist \ -r ${KSTAGEDIR}/kernel.${_kernel} \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} . endfor . endif . endfor .endif sign-packages: _pkgbootstrap .PHONY @[ -L "${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/latest" ] && \ unlink ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/latest ; \ ${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh repo \ -o ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} \ ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI)/${PKG_VERSION} \ ${PKGSIGNKEY} ; \ cd ${REPODIR}/$$(${PKG_CMD} -o ABI_FILE=${WSTAGEDIR}/bin/sh config ABI); \ ln -s ${PKG_VERSION} latest # # # checkworld # # Run test suite on installed world. # checkworld: .PHONY @if [ ! -x "${LOCALBASE}/bin/kyua" ]; then \ echo "You need kyua (devel/kyua) to run the test suite." | /usr/bin/fmt; \ exit 1; \ fi ${_+_}PATH="$$PATH:${LOCALBASE}/bin" kyua test -k ${TESTSBASE}/Kyuafile # # # doxygen # # Build the API documentation with doxygen # doxygen: .PHONY @if [ ! -x "${LOCALBASE}/bin/doxygen" ]; then \ echo "You need doxygen (devel/doxygen) to generate the API documentation of the kernel." | /usr/bin/fmt; \ exit 1; \ fi ${_+_}cd ${.CURDIR}/tools/kerneldoc/subsys; ${MAKE} obj all # # update # # Update the source tree(s), by running svn/svnup to update to the # latest copy. # update: .PHONY .if defined(SVN_UPDATE) @echo "--------------------------------------------------------------" @echo ">>> Updating ${.CURDIR} using Subversion" @echo "--------------------------------------------------------------" @(cd ${.CURDIR}; ${SVN_CMD} update ${SVNFLAGS}) .endif # # ------------------------------------------------------------------------ # # From here onwards are utility targets used by the 'make world' and # related targets. If your 'world' breaks, you may like to try to fix # the problem and manually run the following targets to attempt to # complete the build. Beware, this is *not* guaranteed to work, you # need to have a pretty good grip on the current state of the system # to attempt to manually finish it. If in doubt, 'make world' again. # # # legacy: Build compatibility shims for the next three targets. This is a # minimal set of tools and shims necessary to compensate for older systems # which don't have the APIs required by the targets built in bootstrap-tools, # build-tools or cross-tools. # # libnv and libl are both requirements for config(8), which is an unconditional # bootstrap-tool. _config_deps= lib/libnv usr.bin/lex/lib legacy: .PHONY .if ${BOOTSTRAPPING} < ${MINIMUM_SUPPORTED_OSREL} && ${BOOTSTRAPPING} != 0 @echo "ERROR: Source upgrades from versions prior to ${MINIMUM_SUPPORTED_REL} are not supported."; \ false .endif .for _tool in tools/build ${_config_deps} ${_+_}@${ECHODIR} "===> ${_tool} (obj,includes,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJWALK}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${WORLDTMP}/legacy includes; \ ${MAKE} DIRPRFX=${_tool}/ MK_INCLUDES=no all; \ ${MAKE} DIRPRFX=${_tool}/ MK_INCLUDES=no \ DESTDIR=${WORLDTMP}/legacy install .endfor # # bootstrap-tools: Build tools needed for compatibility. These are binaries that # are built to build other binaries in the system. However, the focus of these # binaries is usually quite narrow. Bootstrap tools use the host's compiler and # libraries, augmented by -legacy, in addition to the libraries built during # bootstrap-tools. # _bt= _bootstrap-tools # We want to run the build with only ${WORLDTMP} in $PATH to ensure we don't # accidentally run tools that are incompatible but happen to be in $PATH. # This is especially important when building on Linux/MacOS where many of the # programs used during the build accept different flags or generate different # output. On those platforms we only symlink the tools known to be compatible # (e.g. basic utilities such as mkdir) into ${WORLDTMP} and build all others # from the FreeBSD sources during the bootstrap-tools stage. # We want to build without the user's $PATH starting in the bootstrap-tools # phase so the tools used in that phase (ln, cp, etc) must have already been # linked to $WORLDTMP. The tools are listed in the _host_tools_to_symlink # variable in tools/build/Makefile and are linked during the legacy phase. # Since they could be Linux or MacOS binaries, too we must only use flags that # are portable across operating systems. # If BOOTSTRAP_ALL_TOOLS is set we will build all the required tools from the # current source tree. Otherwise we create a symlink to the version found in # $PATH during the bootstrap-tools stage. .if defined(BOOTSTRAP_ALL_TOOLS) # BOOTSTRAPPING will be set on the command line so we can't override it here. # Instead set BOOTSTRAPPING_OSRELDATE so that the value 0 is set ${BSARGS} BOOTSTRAPPING_OSRELDATE:= 0 .endif .if ${MK_GAMES} != "no" _strfile= usr.bin/fortune/strfile .endif .if ${MK_GCC} != "no" && ${MK_CXX} != "no" _gperf= gnu/usr.bin/gperf .endif .if ${MK_VT} != "no" _vtfontcvt= usr.bin/vtfontcvt .endif # If we are not building the bootstrap because BOOTSTRAPPING is sufficient # we symlink the host version to $WORLDTMP instead. By doing this we can also # detect when a bootstrap tool is being used without the required MK_FOO. # If you add a new bootstrap tool where we could also use the host version, # please ensure that you also add a .else case where you add the tool to the # _bootstrap_tools_links variable. .if ${BOOTSTRAPPING} < 1000033 _m4= usr.bin/m4 _lex= usr.bin/lex # Note: lex needs m4 to build but m4 also depends on lex. However, lex can be # bootstrapped so we build lex first. ${_bt}-usr.bin/m4: ${_bt}-lib/libopenbsd ${_bt}-usr.bin/yacc ${_bt}-${_lex} _bt_m4_depend=${_bt}-${_m4} _bt_lex_depend=${_bt}-${_lex} ${_bt_m4_depend} .else _bootstrap_tools_links+=m4 lex .endif # ELF Tool Chain libraries are needed for ELF tools and dtrace tools. # r296685 fix cross-endian objcopy # r310724 fixed PR 215350, a crash in libdwarf with objects built by GCC 6.2. # r334881 added libdwarf constants used by ctfconvert. # r338478 fixed a crash in objcopy for mips64el objects # r339083 libelf: correct mips64el test to use ELF header .if ${BOOTSTRAPPING} < 1200085 _elftoolchain_libs= lib/libelf lib/libdwarf ${_bt}-lib/libelf: ${_bt_m4_depend} ${_bt}-lib/libdwarf: ${_bt_m4_depend} .endif # r245440 mtree -N support added # r313404 requires sha384.h for libnetbsd, added to libmd in r292782 .if ${BOOTSTRAPPING} < 1100093 _nmtree= lib/libmd \ lib/libnetbsd \ usr.sbin/nmtree ${_bt}-lib/libnetbsd: ${_bt}-lib/libmd ${_bt}-usr.sbin/nmtree: ${_bt}-lib/libnetbsd .else _bootstrap_tools_links+=mtree .endif # r246097: log addition login.conf.db, passwd, pwd.db, and spwd.db with cat -l .if ${BOOTSTRAPPING} < 1000027 _cat= bin/cat .else _bootstrap_tools_links+=cat .endif # r277259 crunchide: Correct 64-bit section header offset # r281674 crunchide: always include both 32- and 64-bit ELF support .if ${BOOTSTRAPPING} < 1100078 _crunchide= usr.sbin/crunch/crunchide .else _bootstrap_tools_links+=crunchide .endif # r285986 crunchen: use STRIPBIN rather than STRIP # 1100113: Support MK_AUTO_OBJ # 1200006: META_MODE fixes .if ${BOOTSTRAPPING} < 1100078 || \ (${MK_AUTO_OBJ} == "yes" && ${BOOTSTRAPPING} < 1100114) || \ (${MK_META_MODE} == "yes" && ${BOOTSTRAPPING} < 1200006) _crunchgen= usr.sbin/crunch/crunchgen .else _bootstrap_tools_links+=crunchgen .endif # r296926 -P keymap search path, MFC to stable/10 in r298297 .if ${BOOTSTRAPPING} < 1003501 || \ (${BOOTSTRAPPING} >= 1100000 && ${BOOTSTRAPPING} < 1100103) _kbdcontrol= usr.sbin/kbdcontrol .else _bootstrap_tools_links+=kbdcontrol .endif _yacc= lib/liby \ usr.bin/yacc ${_bt}-usr.bin/yacc: ${_bt}-lib/liby .if ${MK_BSNMP} != "no" _gensnmptree= usr.sbin/bsnmpd/gensnmptree .endif .if ${MK_LOCALES} != "no" _localedef= usr.bin/localedef .endif # We need to build tblgen when we're building clang or lld, either as # bootstrap tools, or as the part of the normal build. .if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_CLANG} != "no" || \ ${MK_LLD_BOOTSTRAP} != "no" || ${MK_LLD} != "no" _clang_tblgen= \ lib/clang/libllvmminimal \ usr.bin/clang/llvm-tblgen \ usr.bin/clang/clang-tblgen ${_bt}-usr.bin/clang/clang-tblgen: ${_bt}-lib/clang/libllvmminimal ${_bt}-usr.bin/clang/llvm-tblgen: ${_bt}-lib/clang/libllvmminimal .endif # Default to building the GPL DTC, but build the BSDL one if users explicitly # request it. _dtc= usr.bin/dtc .if ${MK_GPL_DTC} != "no" _dtc= gnu/usr.bin/dtc .endif .if ${MK_LOCALES} != "no" _localedef= usr.bin/localedef .endif .if ${MK_KERBEROS} != "no" _kerberos5_bootstrap_tools= \ kerberos5/tools/make-roken \ kerberos5/lib/libroken \ kerberos5/lib/libvers \ kerberos5/tools/asn1_compile \ kerberos5/tools/slc \ usr.bin/compile_et .ORDER: ${_kerberos5_bootstrap_tools:C/^/${_bt}-/g} .for _tool in ${_kerberos5_bootstrap_tools} ${_bt}-${_tool}: ${_bt}-usr.bin/yacc ${_bt_lex_depend} .endfor .endif ${_bt}-usr.bin/mandoc: ${_bt}-lib/libopenbsd # The tools listed in _basic_bootstrap_tools will generally not be # bootstrapped unless BOOTSTRAP_ALL_TOOL is set. However, when building on a # Linux or MacOS host the host versions are incompatible so we need to build # them from the source tree. Usually the link name will be the same as the subdir, # but some directories such as grep or test install multiple binaries. In that # case we use the _basic_bootstrap_tools_multilink variable which is a list of # subdirectory and comma-separated list of files. _basic_bootstrap_tools_multilink=usr.bin/grep grep,egrep,fgrep _basic_bootstrap_tools_multilink+=bin/test test,[ # bootstrap tools needed by buildworld: _basic_bootstrap_tools=usr.bin/awk usr.bin/cut bin/expr usr.bin/gencat \ usr.bin/join usr.bin/mktemp bin/rmdir usr.bin/sed usr.bin/sort \ usr.bin/truncate usr.bin/tsort # elf2aout is required for sparc64 build _basic_bootstrap_tools+=usr.bin/elf2aout # file2c is required for building usr.sbin/config: _basic_bootstrap_tools+=usr.bin/file2c # uuencode/uudecode required for share/tabset _basic_bootstrap_tools+=usr.bin/uuencode usr.bin/uudecode # xargs is required by mkioctls _basic_bootstrap_tools+=usr.bin/xargs # cap_mkdb is required for share/termcap: _basic_bootstrap_tools+=usr.bin/cap_mkdb # ldd is required for installcheck (TODO: just always use /usr/bin/ldd instead?) _basic_bootstrap_tools+=usr.bin/ldd # services_mkdb/pwd_mkdb are required for installworld: _basic_bootstrap_tools+=usr.sbin/services_mkdb usr.sbin/pwd_mkdb # sysctl/chflags are required for installkernel: _basic_bootstrap_tools+=sbin/sysctl bin/chflags .if ${MK_AMD} != "no" # unifdef is only used by usr.sbin/amd/libamu/Makefile _basic_bootstrap_tools+=usr.bin/unifdef .endif .if ${MK_BOOT} != "no" _basic_bootstrap_tools+=bin/dd # xz/unxz is used by EFI _basic_bootstrap_tools_multilink+=usr.bin/xz xz,unxz # md5 is used by boot/beri (and possibly others) _basic_bootstrap_tools+=sbin/md5 .if defined(BOOTSTRAP_ALL_TOOLS) ${_bt}-sbin/md5: ${_bt}-lib/libmd .endif .endif .if ${MK_ZONEINFO} != "no" _basic_bootstrap_tools+=usr.sbin/zic usr.sbin/tzsetup .endif .if defined(BOOTSTRAP_ALL_TOOLS) _other_bootstrap_tools+=${_basic_bootstrap_tools} .for _subdir _links in ${_basic_bootstrap_tools_multilink} _other_bootstrap_tools+=${_subdir} .endfor ${_bt}-usr.bin/awk: ${_bt_lex_depend} ${_bt}-usr.bin/yacc ${_bt}-bin/expr: ${_bt_lex_depend} ${_bt}-usr.bin/yacc # If we are bootstrapping file2c, we have to build it before config: ${_bt}-usr.sbin/config: ${_bt}-usr.bin/file2c ${_bt_lex_depend} # Note: no symlink to make/bmake in the !BOOTSTRAP_ALL_TOOLS case here since # the links to make/bmake make links will have already have been created in the # `make legacy` step. Not adding a link to make is important on non-FreeBSD # since "make" will usually point to GNU make there. _other_bootstrap_tools+=usr.bin/bmake .else # All tools in _basic_bootstrap_tools have the same name as the subdirectory # so we can use :T to get the name of the symlinks that we need to create. _bootstrap_tools_links+=${_basic_bootstrap_tools:T} .for _subdir _links in ${_basic_bootstrap_tools_multilink} _bootstrap_tools_links+=${_links:S/,/ /g} .endfor .endif # defined(BOOTSTRAP_ALL_TOOLS) # Link the tools that we need for building but don't need to bootstrap because # the host version is known to be compatible into ${WORLDTMP}/legacy # We do this before building any of the bootstrap tools in case they depend on # the presence of any of the links (e.g. as m4/lex/awk) ${_bt}-links: .PHONY .for _tool in ${_bootstrap_tools_links} ${_bt}-link-${_tool}: .PHONY .MAKE @if [ ! -e "${WORLDTMP}/legacy/bin/${_tool}" ]; then \ source_path=`which ${_tool}`; \ if [ ! -e "$${source_path}" ] ; then \ echo "Cannot find host tool '${_tool}'"; false; \ fi; \ ln -sfnv "$${source_path}" "${WORLDTMP}/legacy/bin/${_tool}"; \ fi ${_bt}-links: ${_bt}-link-${_tool} .endfor bootstrap-tools: ${_bt}-links .PHONY # Please document (add comment) why something is in 'bootstrap-tools'. # Try to bound the building of the bootstrap-tool to just the # FreeBSD versions that need the tool built at this stage of the build. .for _tool in \ ${_clang_tblgen} \ ${_kerberos5_bootstrap_tools} \ ${_strfile} \ ${_gperf} \ ${_dtc} \ ${_cat} \ ${_kbdcontrol} \ ${_elftoolchain_libs} \ usr.bin/lorder \ lib/libopenbsd \ usr.bin/mandoc \ usr.bin/rpcgen \ ${_yacc} \ ${_m4} \ ${_lex} \ ${_other_bootstrap_tools} \ usr.bin/xinstall \ ${_gensnmptree} \ usr.sbin/config \ ${_crunchide} \ ${_crunchgen} \ ${_nmtree} \ ${_vtfontcvt} \ ${_localedef} ${_bt}-${_tool}: ${_bt}-links .PHONY .MAKE ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJWALK}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ if [ "${_tool}" = "usr.bin/lex" ]; then \ ${MAKE} DIRPRFX=${_tool}/ bootstrap; \ fi; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${WORLDTMP}/legacy install bootstrap-tools: ${_bt}-${_tool} .endfor # # build-tools: Build special purpose build tools # .if !defined(NO_SHARE) && ${MK_SYSCONS} != "no" _share= share/syscons/scrnmaps .endif .if ${MK_GCC} != "no" _gcc_tools= gnu/usr.bin/cc/cc_tools .endif .if ${MK_RESCUE} != "no" # rescue includes programs that have build-tools targets _rescue=rescue/rescue .endif .if ${MK_TCSH} != "no" _tcsh=bin/csh .endif .if ${MK_FILE} != "no" _libmagic=lib/libmagic .endif .if ${MK_PMC} != "no" && \ (${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386") _jevents=lib/libpmc/pmu-events .endif # kernel-toolchain skips _cleanobj, so handle cleaning up previous # build-tools directories if needed. .if !defined(NO_CLEAN) && make(kernel-toolchain) _bt_clean= ${CLEANDIR} .endif .for _tool in \ ${_tcsh} \ bin/sh \ ${LOCAL_TOOL_DIRS} \ ${_jevents} \ lib/ncurses/ncurses \ lib/ncurses/ncursesw \ ${_rescue} \ ${_share} \ usr.bin/awk \ ${_libmagic} \ usr.bin/mkesdb_static \ usr.bin/mkcsmapper_static \ usr.bin/vi/catalog \ ${_gcc_tools} build-tools_${_tool}: .PHONY ${_+_}@${ECHODIR} "===> ${_tool} (${_bt_clean:D${_bt_clean},}obj,build-tools)"; \ cd ${.CURDIR}/${_tool}; \ if [ -n "${_bt_clean}" ]; then ${MAKE} DIRPRFX=${_tool}/ ${_bt_clean}; fi; \ if [ -z "${NO_OBJWALK}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ build-tools build-tools: build-tools_${_tool} .endfor # # kernel-tools: Build kernel-building tools # kernel-tools: .PHONY mkdir -p ${WORLDTMP}/usr ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${WORLDTMP}/usr >/dev/null # # cross-tools: All the tools needed to build the rest of the system after # we get done with the earlier stages. It is the last set of tools needed # to begin building the target binaries. # .if ${TARGET_ARCH} != ${MACHINE_ARCH} || ${BUILD_WITH_STRICT_TMPPATH} != 0 .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" _btxld= usr.sbin/btxld .endif .endif # Rebuild ctfconvert and ctfmerge to avoid difficult-to-diagnose failures # resulting from missing bug fixes or ELF Toolchain updates. .if ${MK_CDDL} != "no" _dtrace_tools= cddl/lib/libctf cddl/usr.bin/ctfconvert \ cddl/usr.bin/ctfmerge .endif # If we're given an XAS, don't build binutils. .if ${XAS:M/*} == "" .if ${MK_BINUTILS_BOOTSTRAP} != "no" _binutils= gnu/usr.bin/binutils .endif .if ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no" _elftctools= lib/libelftc \ lib/libpe \ usr.bin/objcopy \ usr.bin/nm \ usr.bin/size \ usr.bin/strings # These are not required by the build, but can be useful for developers who # cross-build on a FreeBSD 10 host: _elftctools+= usr.bin/addr2line .endif .elif ${TARGET_ARCH} != ${MACHINE_ARCH} && ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no" # If cross-building with an external binutils we still need to build strip for # the target (for at least crunchide). _elftctools= lib/libelftc \ lib/libpe \ usr.bin/objcopy .endif .if ${MK_CLANG_BOOTSTRAP} != "no" _clang= usr.bin/clang .endif .if ${MK_LLD_BOOTSTRAP} != "no" _lld= usr.bin/clang/lld .endif .if ${MK_CLANG_BOOTSTRAP} != "no" || ${MK_LLD_BOOTSTRAP} != "no" _clang_libs= lib/clang .endif .if ${MK_GCC_BOOTSTRAP} != "no" _gcc= gnu/usr.bin/cc .endif .if ${MK_USB} != "no" _usb_tools= stand/usb/tools .endif .if ${BUILD_WITH_STRICT_TMPPATH} != 0 || defined(BOOTSTRAP_ALL_TOOLS) _ar=usr.bin/ar .endif cross-tools: .MAKE .PHONY .for _tool in \ ${LOCAL_XTOOL_DIRS} \ ${_ar} \ ${_clang_libs} \ ${_clang} \ ${_lld} \ ${_binutils} \ ${_elftctools} \ ${_dtrace_tools} \ ${_gcc} \ ${_btxld} \ ${_usb_tools} ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJWALK}" ]; then ${MAKE} DIRPRFX=${_tool}/ obj; fi; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${WORLDTMP} install .endfor # # native-xtools is the current target for qemu-user cross builds of ports # via poudriere and the imgact_binmisc kernel module. # This target merely builds a toolchan/sysroot, then builds the tools it wants # with the options it wants in a special MAKEOBJDIRPREFIX, using the toolchain # already built. It then installs the static tools to NXBDESTDIR for Poudriere # to pickup. # NXBOBJROOT= ${OBJROOT}${MACHINE}.${MACHINE_ARCH}/nxb/ NXBOBJTOP= ${NXBOBJROOT}${NXB_TARGET}.${NXB_TARGET_ARCH} NXTP?= /nxb-bin .if ${NXTP:N/*} .error NXTP variable should be an absolute path .endif NXBDESTDIR?= ${DESTDIR}${NXTP} # This is the list of tools to be built/installed as static and where # appropriate to build for the given TARGET.TARGET_ARCH. NXBDIRS+= \ bin/cat \ bin/chmod \ bin/cp \ ${_tcsh} \ bin/echo \ bin/expr \ bin/hostname \ bin/ln \ bin/ls \ bin/mkdir \ bin/mv \ bin/ps \ bin/realpath \ bin/rm \ bin/rmdir \ bin/sh \ bin/sleep \ sbin/md5 \ sbin/sysctl \ usr.bin/addr2line \ usr.bin/ar \ usr.bin/awk \ usr.bin/basename \ usr.bin/bmake \ usr.bin/bzip2 \ usr.bin/cmp \ usr.bin/diff \ usr.bin/dirname \ usr.bin/objcopy \ usr.bin/env \ usr.bin/fetch \ usr.bin/find \ usr.bin/grep \ usr.bin/gzip \ usr.bin/id \ usr.bin/lex \ usr.bin/limits \ usr.bin/lorder \ usr.bin/mandoc \ usr.bin/mktemp \ usr.bin/mt \ usr.bin/nm \ usr.bin/patch \ usr.bin/readelf \ usr.bin/sed \ usr.bin/size \ usr.bin/sort \ usr.bin/strings \ usr.bin/tar \ usr.bin/touch \ usr.bin/tr \ usr.bin/true \ usr.bin/uniq \ usr.bin/unzip \ usr.bin/xargs \ usr.bin/xinstall \ usr.bin/xz \ usr.bin/yacc \ usr.sbin/chown SUBDIR_DEPEND_usr.bin/clang= lib/clang .if ${MK_CLANG} != "no" NXBDIRS+= lib/clang NXBDIRS+= usr.bin/clang .endif .if ${MK_GCC} != "no" NXBDIRS+= gnu/usr.bin/cc .endif .if ${MK_BINUTILS} != "no" NXBDIRS+= gnu/usr.bin/binutils .endif # XXX: native-xtools passes along ${NXBDIRS} in SUBDIR_OVERRIDE that needs # to be evaluated after NXBDIRS is set. .if make(install) && !empty(SUBDIR_OVERRIDE) SUBDIR= ${SUBDIR_OVERRIDE} .endif NXBMAKEARGS+= \ OBJTOP=${NXBOBJTOP:Q} \ OBJROOT=${NXBOBJROOT:Q} \ MAKEOBJDIRPREFIX= \ -DNO_SHARED \ -DNO_CPU_CFLAGS \ -DNO_PIC \ SSP_CFLAGS= \ MK_CLANG_EXTRAS=no \ MK_CLANG_FULL=no \ MK_CTF=no \ MK_DEBUG_FILES=no \ MK_GDB=no \ MK_HTML=no \ MK_LLDB=no \ MK_MAN=no \ MK_MAN_UTILS=yes \ MK_OFED=no \ MK_OPENSSH=no \ MK_PROFILE=no \ MK_RETPOLINE=no \ MK_SENDMAIL=no \ MK_SVNLITE=no \ MK_TESTS=no \ MK_WARNS=no \ MK_ZFS=no .if make(native-xtools*) && \ (!defined(NXB_TARGET) || !defined(NXB_TARGET_ARCH)) .error Missing NXB_TARGET / NXB_TARGET_ARCH .endif # For 'toolchain' we want to produce native binaries that themselves generate # native binaries. NXBTMAKE= ${NXBMAKEENV} ${MAKE} ${NXBMAKEARGS:N-DNO_PIC:N-DNO_SHARED} \ TARGET=${MACHINE} TARGET_ARCH=${MACHINE_ARCH} # For 'everything' we want to produce native binaries (hence -target to # be MACHINE) that themselves generate TARGET.TARGET_ARCH binaries. # TARGET/TARGET_ARCH are still passed along from user. # # Use the toolchain we create as an external toolchain. .if ${USING_SYSTEM_COMPILER} == "yes" || ${XCC:N${CCACHE_BIN}:M/*} NXBMAKE+= XCC="${XCC}" \ XCXX="${XCXX}" \ XCPP="${XCPP}" .else NXBMAKE+= XCC="${NXBOBJTOP}/tmp/usr/bin/cc" \ XCXX="${NXBOBJTOP}/tmp/usr/bin/c++" \ XCPP="${NXBOBJTOP}/tmp/usr/bin/cpp" .endif NXBMAKE+= ${NXBMAKEENV} ${MAKE} -f Makefile.inc1 ${NXBMAKEARGS} \ TARGET=${NXB_TARGET} TARGET_ARCH=${NXB_TARGET_ARCH} \ TARGET_TRIPLE=${MACHINE_TRIPLE:Q} # NXBDIRS is improperly based on MACHINE rather than NXB_TARGET. Need to # invoke a sub-make to reevaluate MK_GCC, etc, for NXBDIRS. NXBMAKE+= SUBDIR_OVERRIDE='$${NXBDIRS:M*}' # Need to avoid the -isystem logic when using clang as an external toolchain # even if the TARGET being built for wants GCC. NXBMAKE+= WANT_COMPILER_TYPE='$${X_COMPILER_TYPE}' native-xtools: .PHONY ${_+_}cd ${.CURDIR}; ${NXBTMAKE} _cleanobj MK_GCC=yes # Build the bootstrap/host/cross tools that produce native binaries # Pass along MK_GCC=yes to ensure GCC-needed build tools are built. # We don't quite know what the NXB_TARGET wants so just build it. ${_+_}cd ${.CURDIR}; ${NXBTMAKE} kernel-toolchain MK_GCC=yes # Populate includes/libraries sysroot that produce native binaries. # This is split out from 'toolchain' above mostly so that target LLVM # libraries have a proper LLVM_DEFAULT_TARGET_TRIPLE without # polluting the cross-compiler build. The LLVM/GCC libs are skipped # here to avoid the problem but are kept in 'toolchain' so that # needed build tools are built. ${_+_}cd ${.CURDIR}; ${NXBTMAKE} _includes MK_CLANG=no MK_GCC=no ${_+_}cd ${.CURDIR}; ${NXBTMAKE} _libraries MK_CLANG=no MK_GCC=no # Clean out improper TARGET=MACHINE files ${_+_}cd ${.CURDIR}/gnu/usr.bin/cc/cc_tools; ${NXBTMAKE} cleandir .if !defined(NO_OBJWALK) ${_+_}cd ${.CURDIR}; ${NXBMAKE} _obj .endif ${_+_}cd ${.CURDIR}; ${NXBMAKE} everything @echo ">> native-xtools done. Use 'make native-xtools-install' to install to a given DESTDIR" native-xtools-install: .PHONY mkdir -p ${NXBDESTDIR}/bin ${NXBDESTDIR}/sbin ${NXBDESTDIR}/usr ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${NXBDESTDIR}/usr >/dev/null ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${NXBDESTDIR}/usr/include >/dev/null ${_+_}cd ${.CURDIR}; ${NXBMAKE} \ DESTDIR=${NXBDESTDIR} \ -DNO_ROOT \ install # # hierarchy - ensure that all the needed directories are present # hierarchy hier: .MAKE .PHONY ${_+_}cd ${.CURDIR}/etc; ${HMAKE} distrib-dirs # # libraries - build all libraries, and install them under ${DESTDIR}. # # The list of libraries with dependents (${_prebuild_libs}) and their # interdependencies (__L) are built automatically by the # ${.CURDIR}/tools/make_libdeps.sh script. # libraries: .MAKE .PHONY ${_+_}cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 _prereq_libs; \ ${MAKE} -f Makefile.inc1 _startup_libs; \ ${MAKE} -f Makefile.inc1 _prebuild_libs; \ ${MAKE} -f Makefile.inc1 _generic_libs # # static libgcc.a prerequisite for shared libc # _prereq_libs= lib/libcompiler_rt .if ${MK_SSP} != "no" _prereq_libs+= gnu/lib/libssp/libssp_nonshared .endif # These dependencies are not automatically generated: # # gnu/lib/csu, gnu/lib/libgcc, lib/csu and lib/libc must be built before # all shared libraries for ELF. # _startup_libs= gnu/lib/csu _startup_libs+= lib/csu _startup_libs+= lib/libcompiler_rt _startup_libs+= lib/libc _startup_libs+= lib/libc_nonshared .if ${MK_LIBCPLUSPLUS} != "no" _startup_libs+= lib/libcxxrt .endif .if ${MK_LLVM_LIBUNWIND} != "no" _prereq_libs+= lib/libgcc_eh lib/libgcc_s _startup_libs+= lib/libgcc_eh lib/libgcc_s lib/libgcc_s__L: lib/libc__L lib/libgcc_s__L: lib/libc_nonshared__L .if ${MK_LIBCPLUSPLUS} != "no" lib/libcxxrt__L: lib/libgcc_s__L .endif .else # MK_LLVM_LIBUNWIND == no _prereq_libs+= gnu/lib/libgcc _startup_libs+= gnu/lib/libgcc gnu/lib/libgcc__L: lib/libc__L gnu/lib/libgcc__L: lib/libc_nonshared__L .if ${MK_LIBCPLUSPLUS} != "no" lib/libcxxrt__L: gnu/lib/libgcc__L .endif .endif _prebuild_libs= ${_kerberos5_lib_libasn1} \ ${_kerberos5_lib_libhdb} \ ${_kerberos5_lib_libheimbase} \ ${_kerberos5_lib_libheimntlm} \ ${_libsqlite3} \ ${_kerberos5_lib_libheimipcc} \ ${_kerberos5_lib_libhx509} ${_kerberos5_lib_libkrb5} \ ${_kerberos5_lib_libroken} \ ${_kerberos5_lib_libwind} \ lib/libbz2 ${_libcom_err} lib/libcrypt \ lib/libelf lib/libexpat \ lib/libfigpar \ ${_lib_libgssapi} \ lib/libkiconv lib/libkvm lib/liblzma lib/libmd lib/libnv \ ${_lib_casper} \ lib/ncurses/ncurses lib/ncurses/ncursesw \ lib/libopie lib/libpam/libpam ${_lib_libthr} \ ${_lib_libradius} lib/libsbuf lib/libtacplus \ lib/libgeom \ ${_cddl_lib_libumem} ${_cddl_lib_libnvpair} \ ${_cddl_lib_libuutil} \ ${_cddl_lib_libavl} \ ${_cddl_lib_libzfs_core} ${_cddl_lib_libzfs} \ ${_cddl_lib_libctf} \ lib/libufs \ lib/libutil lib/libpjdlog ${_lib_libypclnt} lib/libz lib/msun \ ${_secure_lib_libcrypto} ${_secure_lib_libssl} \ ${_lib_libldns} ${_secure_lib_libssh} .if ${MK_GNUCXX} != "no" _prebuild_libs+= gnu/lib/libstdc++ gnu/lib/libsupc++ gnu/lib/libstdc++__L: lib/msun__L gnu/lib/libsupc++__L: gnu/lib/libstdc++__L .endif .if ${MK_DIALOG} != "no" _prebuild_libs+= gnu/lib/libdialog gnu/lib/libdialog__L: lib/msun__L lib/ncurses/ncursesw__L .endif .if ${MK_LIBCPLUSPLUS} != "no" _prebuild_libs+= lib/libc++ .endif lib/libgeom__L: lib/libexpat__L lib/libkvm__L: lib/libelf__L .if ${MK_LIBTHR} != "no" _lib_libthr= lib/libthr .endif .if ${MK_RADIUS_SUPPORT} != "no" _lib_libradius= lib/libradius .endif .if ${MK_OFED} != "no" _prebuild_libs+= \ lib/ofed/libibverbs \ lib/ofed/libibmad \ lib/ofed/libibumad \ lib/ofed/complib \ lib/ofed/libmlx5 lib/ofed/libibmad__L: lib/ofed/libibumad__L lib/ofed/complib__L: lib/libthr__L lib/ofed/libmlx5__L: lib/ofed/libibverbs__L lib/libthr__L .endif .if ${MK_CASPER} != "no" _lib_casper= lib/libcasper .endif lib/libpjdlog__L: lib/libutil__L lib/libcasper__L: lib/libnv__L lib/liblzma__L: lib/libthr__L _generic_libs= ${_cddl_lib} gnu/lib ${_kerberos5_lib} lib ${_secure_lib} usr.bin/lex/lib .if ${MK_IPFILTER} != "no" _generic_libs+= sbin/ipf/libipf .endif .for _DIR in ${LOCAL_LIB_DIRS} .if ${_DIR} == ".WAIT" || (empty(_generic_libs:M${_DIR}) && exists(${.CURDIR}/${_DIR}/Makefile)) _generic_libs+= ${_DIR} .endif .endfor lib/libopie__L lib/libtacplus__L: lib/libmd__L .if ${MK_CDDL} != "no" _cddl_lib_libumem= cddl/lib/libumem _cddl_lib_libnvpair= cddl/lib/libnvpair _cddl_lib_libavl= cddl/lib/libavl _cddl_lib_libuutil= cddl/lib/libuutil .if ${MK_ZFS} != "no" _cddl_lib_libzfs_core= cddl/lib/libzfs_core _cddl_lib_libzfs= cddl/lib/libzfs cddl/lib/libzfs_core__L: cddl/lib/libnvpair__L cddl/lib/libzfs__L: cddl/lib/libzfs_core__L lib/msun__L lib/libutil__L cddl/lib/libzfs__L: lib/libthr__L lib/libmd__L lib/libz__L cddl/lib/libumem__L cddl/lib/libzfs__L: cddl/lib/libuutil__L cddl/lib/libavl__L lib/libgeom__L lib/libbe__L: cddl/lib/libzfs__L .endif _cddl_lib_libctf= cddl/lib/libctf _cddl_lib= cddl/lib cddl/lib/libctf__L: lib/libz__L .endif # cddl/lib/libdtrace requires lib/libproc and lib/librtld_db; it's only built # on select architectures though (see cddl/lib/Makefile) .if ${MACHINE_CPUARCH} != "sparc64" _prebuild_libs+= lib/libprocstat lib/libproc lib/librtld_db lib/libprocstat__L: lib/libelf__L lib/libkvm__L lib/libutil__L lib/libproc__L: lib/libprocstat__L lib/librtld_db__L: lib/libprocstat__L .endif .if ${MK_CRYPT} != "no" .if ${MK_OPENSSL} != "no" _secure_lib_libcrypto= secure/lib/libcrypto _secure_lib_libssl= secure/lib/libssl lib/libradius__L secure/lib/libssl__L: secure/lib/libcrypto__L secure/lib/libcrypto__L: lib/libthr__L .if ${MK_LDNS} != "no" _lib_libldns= lib/libldns lib/libldns__L: secure/lib/libssl__L .endif .if ${MK_OPENSSH} != "no" _secure_lib_libssh= secure/lib/libssh secure/lib/libssh__L: lib/libz__L secure/lib/libcrypto__L lib/libcrypt__L .if ${MK_LDNS} != "no" secure/lib/libssh__L: lib/libldns__L .endif .if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no" secure/lib/libssh__L: lib/libgssapi__L kerberos5/lib/libkrb5__L \ kerberos5/lib/libhx509__L kerberos5/lib/libasn1__L lib/libcom_err__L \ lib/libmd__L kerberos5/lib/libroken__L .endif .endif .endif _secure_lib= secure/lib .endif .if ${MK_KERBEROS} != "no" kerberos5/lib/libasn1__L: lib/libcom_err__L kerberos5/lib/libroken__L kerberos5/lib/libhdb__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ kerberos5/lib/libkrb5__L kerberos5/lib/libroken__L \ kerberos5/lib/libwind__L lib/libsqlite3__L kerberos5/lib/libheimntlm__L: secure/lib/libcrypto__L kerberos5/lib/libkrb5__L \ kerberos5/lib/libroken__L lib/libcom_err__L kerberos5/lib/libhx509__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ secure/lib/libcrypto__L kerberos5/lib/libroken__L kerberos5/lib/libwind__L kerberos5/lib/libkrb5__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ lib/libcrypt__L secure/lib/libcrypto__L kerberos5/lib/libhx509__L \ kerberos5/lib/libroken__L kerberos5/lib/libwind__L \ kerberos5/lib/libheimbase__L kerberos5/lib/libheimipcc__L kerberos5/lib/libroken__L: lib/libcrypt__L kerberos5/lib/libwind__L: kerberos5/lib/libroken__L lib/libcom_err__L kerberos5/lib/libheimbase__L: lib/libthr__L kerberos5/lib/libheimipcc__L: kerberos5/lib/libroken__L kerberos5/lib/libheimbase__L lib/libthr__L .endif lib/libsqlite3__L: lib/libthr__L .if ${MK_GSSAPI} != "no" _lib_libgssapi= lib/libgssapi .endif .if ${MK_KERBEROS} != "no" _kerberos5_lib= kerberos5/lib _kerberos5_lib_libasn1= kerberos5/lib/libasn1 _kerberos5_lib_libhdb= kerberos5/lib/libhdb _kerberos5_lib_libheimbase= kerberos5/lib/libheimbase _kerberos5_lib_libkrb5= kerberos5/lib/libkrb5 _kerberos5_lib_libhx509= kerberos5/lib/libhx509 _kerberos5_lib_libroken= kerberos5/lib/libroken _kerberos5_lib_libheimntlm= kerberos5/lib/libheimntlm _libsqlite3= lib/libsqlite3 _kerberos5_lib_libheimipcc= kerberos5/lib/libheimipcc _kerberos5_lib_libwind= kerberos5/lib/libwind _libcom_err= lib/libcom_err .endif .if ${MK_NIS} != "no" _lib_libypclnt= lib/libypclnt .endif .if ${MK_OPENSSL} == "no" lib/libradius__L: lib/libmd__L .endif lib/libproc__L: \ ${_cddl_lib_libctf:D${_cddl_lib_libctf}__L} lib/libelf__L lib/librtld_db__L lib/libutil__L .if ${MK_CXX} != "no" .if ${MK_LIBCPLUSPLUS} != "no" lib/libproc__L: lib/libcxxrt__L .else # This implies MK_GNUCXX != "no"; see lib/libproc lib/libproc__L: gnu/lib/libsupc++__L .endif .endif .for _lib in ${_prereq_libs} ${_lib}__PL: .PHONY .MAKE .if !defined(_MKSHOWCONFIG) && exists(${.CURDIR}/${_lib}) ${_+_}@${ECHODIR} "===> ${_lib} (obj,all,install)"; \ cd ${.CURDIR}/${_lib}; \ if [ -z "${NO_OBJWALK}" ]; then ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ obj; fi; \ ${MAKE} MK_TESTS=no MK_PROFILE=no -DNO_PIC \ DIRPRFX=${_lib}/ all; \ ${MAKE} MK_TESTS=no MK_PROFILE=no -DNO_PIC \ DIRPRFX=${_lib}/ install .endif .endfor .for _lib in ${_startup_libs} ${_prebuild_libs} ${_generic_libs} ${_lib}__L: .PHONY .MAKE .if !defined(_MKSHOWCONFIG) && exists(${.CURDIR}/${_lib}) ${_+_}@${ECHODIR} "===> ${_lib} (obj,all,install)"; \ cd ${.CURDIR}/${_lib}; \ if [ -z "${NO_OBJWALK}" ]; then ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ obj; fi; \ ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ all; \ ${MAKE} MK_TESTS=no DIRPRFX=${_lib}/ install .endif .endfor _prereq_libs: ${_prereq_libs:S/$/__PL/} _startup_libs: ${_startup_libs:S/$/__L/} _prebuild_libs: ${_prebuild_libs:S/$/__L/} _generic_libs: ${_generic_libs:S/$/__L/} # Enable SUBDIR_PARALLEL when not calling 'make all', unless called from # 'everything' with _PARALLEL_SUBDIR_OK set. This is because it is unlikely # that running 'make all' from the top-level, especially with a SUBDIR_OVERRIDE # or LOCAL_DIRS set, will have a reliable build if SUBDIRs are built in # parallel. This is safe for the world stage of buildworld though since it has # already built libraries in a proper order and installed includes into # WORLDTMP. Special handling is done for SUBDIR ordering for 'install*' to # avoid trashing a system if it crashes mid-install. .if !make(all) || defined(_PARALLEL_SUBDIR_OK) SUBDIR_PARALLEL= .endif .include .if make(check-old) || make(check-old-dirs) || \ make(check-old-files) || make(check-old-libs) || \ make(delete-old) || make(delete-old-dirs) || \ make(delete-old-files) || make(delete-old-libs) # # check for / delete old files section # .include "ObsoleteFiles.inc" OLD_LIBS_MESSAGE="Please be sure no application still uses those libraries, \ else you can not start such an application. Consult UPDATING for more \ information regarding how to cope with the removal/revision bump of a \ specific library." .if !defined(BATCH_DELETE_OLD_FILES) RM_I=-i .else RM_I=-v .endif delete-old-files: .PHONY @echo ">>> Removing old files (only deletes safe to delete libs)" # Ask for every old file if the user really wants to remove it. # It's annoying, but better safe than sorry. # NB: We cannot pass the list of OLD_FILES as a parameter because the # argument list will get too long. Using .for/.endfor make "loops" will make # the Makefile parser segfault. @exec 3<&0; \ cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_FILES -V "OLD_FILES:Musr/share/*.gz:R" | xargs -n1 | sort | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \ rm ${RM_I} "${DESTDIR}/$${file}" <&3; \ fi; \ for ext in debug symbols; do \ if ! [ -e "${DESTDIR}/$${file}" ] && [ -f \ "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ rm ${RM_I} "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" \ <&3; \ fi; \ done; \ done # Remove catpages without corresponding manpages. @exec 3<&0; \ find ${DESTDIR}/usr/share/man/cat* ! -type d 2>/dev/null | sort | \ sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \ while read catpage; do \ read manpage; \ if [ ! -e "$${manpage}" ]; then \ rm ${RM_I} $${catpage} <&3; \ fi; \ done @echo ">>> Old files removed" check-old-files: .PHONY @echo ">>> Checking for old files" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_FILES -V "OLD_FILES:Musr/share/*.gz:R" | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ echo "${DESTDIR}/$${file}"; \ fi; \ for ext in debug symbols; do \ if [ -f "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}"; \ fi; \ done; \ done | sort # Check for catpages without corresponding manpages. @find ${DESTDIR}/usr/share/man/cat* ! -type d 2>/dev/null | \ sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \ while read catpage; do \ read manpage; \ if [ ! -e "$${manpage}" ]; then \ echo $${catpage}; \ fi; \ done | sort delete-old-libs: .PHONY @echo ">>> Removing old libraries" @echo "${OLD_LIBS_MESSAGE}" | fmt @exec 3<&0; \ cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_LIBS | xargs -n1 | sort | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \ rm ${RM_I} "${DESTDIR}/$${file}" <&3; \ fi; \ for ext in debug symbols; do \ if ! [ -e "${DESTDIR}/$${file}" ] && [ -f \ "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ rm ${RM_I} "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" \ <&3; \ fi; \ done; \ done @echo ">>> Old libraries removed" check-old-libs: .PHONY @echo ">>> Checking for old libraries" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_LIBS | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ echo "${DESTDIR}/$${file}"; \ fi; \ for ext in debug symbols; do \ if [ -f "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${file}.$${ext}"; \ fi; \ done; \ done | sort delete-old-dirs: .PHONY @echo ">>> Removing old directories" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_DIRS | xargs -n1 | sort -r | \ while read dir; do \ if [ -d "${DESTDIR}/$${dir}" ]; then \ rmdir -v "${DESTDIR}/$${dir}" || true; \ elif [ -L "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ if [ -d "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ rmdir -v "${DESTDIR}${DEBUGDIR}/$${dir}" || true; \ elif [ -L "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ done @echo ">>> Old directories removed" check-old-dirs: .PHONY @echo ">>> Checking for old directories" @cd ${.CURDIR}; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_DIRS | xargs -n1 | sort -r | \ while read dir; do \ if [ -d "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir}"; \ elif [ -L "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ if [ -d "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir}"; \ elif [ -L "${DESTDIR}${DEBUGDIR}/$${dir}" ]; then \ echo "${DESTDIR}${DEBUGDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ done delete-old: delete-old-files delete-old-dirs .PHONY @echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'." check-old: check-old-files check-old-libs check-old-dirs .PHONY @echo "To remove old files and directories run '${MAKE_CMD} delete-old'." @echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'." .endif # # showconfig - show build configuration. # showconfig: .PHONY @(${MAKE} -n -f ${.CURDIR}/sys/conf/kern.opts.mk -V dummy -dg1 UPDATE_DEPENDFILE=no NO_OBJ=yes; \ ${MAKE} -n -f ${.CURDIR}/share/mk/src.opts.mk -V dummy -dg1 UPDATE_DEPENDFILE=no NO_OBJ=yes) 2>&1 | grep ^MK_ | sort -u .if !empty(KRNLOBJDIR) && !empty(KERNCONF) DTBOUTPUTPATH= ${KRNLOBJDIR}/${KERNCONF}/ .if !defined(FDT_DTS_FILE) || empty(FDT_DTS_FILE) .if !defined(_MKSHOWCONFIG) && exists(${KERNCONFDIR}/${KERNCONF}) FDT_DTS_FILE!= awk 'BEGIN {FS="="} /^makeoptions[[:space:]]+FDT_DTS_FILE/ {print $$2}' \ '${KERNCONFDIR}/${KERNCONF}' ; echo .endif .endif .endif .if !defined(DTBOUTPUTPATH) || !exists(${DTBOUTPUTPATH}) DTBOUTPUTPATH= ${.CURDIR} .endif # # Build 'standalone' Device Tree Blob # builddtb: .PHONY @PATH=${TMPPATH} MACHINE=${TARGET} \ ${.CURDIR}/sys/tools/fdt/make_dtb.sh ${.CURDIR}/sys \ "${FDT_DTS_FILE}" ${DTBOUTPUTPATH} ############### # cleanworld # In the following, the first 'rm' in a series will usually remove all # files and directories. If it does not, then there are probably some # files with file flags set, so this unsets them and tries the 'rm' a # second time. There are situations where this target will be cleaning # some directories via more than one method, but that duplication is # needed to correctly handle all the possible situations. Removing all # files without file flags set in the first 'rm' instance saves time, # because 'chflags' will need to operate on fewer files afterwards. # # It is expected that BW_CANONICALOBJDIR == the CANONICALOBJDIR as would be # created by bsd.obj.mk, except that we don't want to .include that file # in this makefile. We don't do a cleandir walk if MK_AUTO_OBJ is yes # since it is not possible for files to land in the wrong place. # .if make(cleanworld) BW_CANONICALOBJDIR:=${OBJTOP}/ .elif make(cleanuniverse) BW_CANONICALOBJDIR:=${OBJROOT} .if ${MK_UNIFIED_OBJDIR} == "no" .error ${.TARGETS} only supported with WITH_UNIFIED_OBJDIR enabled. .endif .endif cleanworld cleanuniverse: .PHONY .if !empty(BW_CANONICALOBJDIR) && exists(${BW_CANONICALOBJDIR}) && \ ${.CURDIR:tA} != ${BW_CANONICALOBJDIR:tA} -rm -rf ${BW_CANONICALOBJDIR}* -chflags -R 0 ${BW_CANONICALOBJDIR} rm -rf ${BW_CANONICALOBJDIR}* .endif .if make(cleanworld) && ${MK_AUTO_OBJ} == "no" && \ (empty(BW_CANONICALOBJDIR) || ${.CURDIR:tA} == ${BW_CANONICALOBJDIR:tA}) .if ${.CURDIR} == ${.OBJDIR} || ${.CURDIR}/obj == ${.OBJDIR} # To be safe in this case, fall back to a 'make cleandir' ${_+_}@cd ${.CURDIR}; ${MAKE} cleandir .endif .endif .if ${TARGET} == ${MACHINE} && ${TARGET_ARCH} == ${MACHINE_ARCH} XDEV_CPUTYPE?=${CPUTYPE} .else XDEV_CPUTYPE?=${TARGET_CPUTYPE} .endif NOFUN=-DNO_FSCHG MK_HTML=no -DNO_LINT \ MK_MAN=no MK_NLS=no MK_PROFILE=no \ MK_KERBEROS=no MK_RESCUE=no MK_TESTS=no MK_WARNS=no \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ CPUTYPE=${XDEV_CPUTYPE} XDDIR=${TARGET_ARCH}-freebsd XDTP?=/usr/${XDDIR} .if ${XDTP:N/*} .error XDTP variable should be an absolute path .endif CDBOBJROOT= ${OBJROOT}${MACHINE}.${MACHINE_ARCH}/xdev/ CDBOBJTOP= ${CDBOBJROOT}${XDDIR} CDBENV= \ INSTALL="sh ${.CURDIR}/tools/install.sh" CDENV= ${CDBENV} \ TOOLS_PREFIX=${XDTP} CDMAKEARGS= \ OBJTOP=${CDBOBJTOP:Q} \ OBJROOT=${CDBOBJROOT:Q} CD2MAKEARGS= ${CDMAKEARGS} .if ${WANT_COMPILER_TYPE} == gcc || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc) # GCC requires -isystem and -L when using a cross-compiler. --sysroot # won't set header path and -L is used to ensure the base library path # is added before the port PREFIX library path. CD2CFLAGS+= -isystem ${XDDESTDIR}/usr/include -L${XDDESTDIR}/usr/lib # GCC requires -B to find /usr/lib/crti.o when using a cross-compiler # combined with --sysroot. CD2CFLAGS+= -B${XDDESTDIR}/usr/lib # Force using libc++ for external GCC. .if defined(X_COMPILER_TYPE) && \ ${X_COMPILER_TYPE} == gcc && ${X_COMPILER_VERSION} >= 40800 CD2CXXFLAGS+= -isystem ${XDDESTDIR}/usr/include/c++/v1 -std=c++11 \ -nostdinc++ .endif .endif CD2CFLAGS+= --sysroot=${XDDESTDIR}/ CD2ENV=${CDENV} CC="${CC} ${CD2CFLAGS}" CXX="${CXX} ${CD2CXXFLAGS} ${CD2CFLAGS}" \ CPP="${CPP} ${CD2CFLAGS}" \ MACHINE=${TARGET} MACHINE_ARCH=${TARGET_ARCH} CDTMP= ${OBJTOP}/${XDDIR}/tmp CDMAKE=${CDENV} PATH=${CDTMP}/usr/bin:${PATH} ${MAKE} ${CDMAKEARGS} ${NOFUN} CD2MAKE=${CD2ENV} PATH=${CDTMP}/usr/bin:${XDDESTDIR}/usr/bin:${PATH} \ ${MAKE} ${CD2MAKEARGS} ${NOFUN} .if ${MK_META_MODE} != "no" # Don't rebuild build-tools targets during normal build. CD2MAKE+= BUILD_TOOLS_META=.NOMETA .endif XDDESTDIR=${DESTDIR}${XDTP} .ORDER: xdev-build xdev-install xdev-links xdev: xdev-build xdev-install .PHONY .ORDER: _xb-worldtmp _xb-bootstrap-tools _xb-build-tools _xb-cross-tools xdev-build: _xb-worldtmp _xb-bootstrap-tools _xb-build-tools _xb-cross-tools .PHONY _xb-worldtmp: .PHONY mkdir -p ${CDTMP}/usr ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${CDTMP}/usr >/dev/null _xb-bootstrap-tools: .PHONY .for _tool in \ ${_clang_tblgen} \ ${_gperf} \ ${_yacc} ${_+_}@${ECHODIR} "===> ${_tool} (obj,all,install)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJWALK}" ]; then ${CDMAKE} DIRPRFX=${_tool}/ obj; fi; \ ${CDMAKE} DIRPRFX=${_tool}/ all; \ ${CDMAKE} DIRPRFX=${_tool}/ DESTDIR=${CDTMP} install .endfor _xb-build-tools: .PHONY ${_+_}@cd ${.CURDIR}; \ ${CDBENV} ${MAKE} ${CDMAKEARGS} -f Makefile.inc1 ${NOFUN} build-tools XDEVDIRS= \ ${_clang_libs} \ ${_lld} \ ${_binutils} \ ${_elftctools} \ usr.bin/ar \ ${_clang} \ ${_gcc} _xb-cross-tools: .PHONY .for _tool in ${XDEVDIRS} ${_+_}@${ECHODIR} "===> xdev ${_tool} (obj,all)"; \ cd ${.CURDIR}/${_tool}; \ if [ -z "${NO_OBJWALK}" ]; then ${CDMAKE} DIRPRFX=${_tool}/ obj; fi; \ ${CDMAKE} DIRPRFX=${_tool}/ all .endfor _xi-mtree: .PHONY ${_+_}@${ECHODIR} "mtree populating ${XDDESTDIR}" mkdir -p ${XDDESTDIR} ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.root.dist \ -p ${XDDESTDIR} >/dev/null ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${XDDESTDIR}/usr >/dev/null ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${XDDESTDIR}/usr/include >/dev/null .if defined(LIBCOMPAT) ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${XDDESTDIR}/usr >/dev/null .endif .if ${MK_TESTS} != "no" mkdir -p ${XDDESTDIR}${TESTSBASE} ${DESTDIR_MTREE} -f ${.CURDIR}/etc/mtree/BSD.tests.dist \ -p ${XDDESTDIR}${TESTSBASE} >/dev/null .endif .ORDER: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries xdev-install: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries .PHONY _xi-cross-tools: .PHONY @echo "_xi-cross-tools" .for _tool in ${XDEVDIRS} ${_+_}@${ECHODIR} "===> xdev ${_tool} (install)"; \ cd ${.CURDIR}/${_tool}; \ ${CDMAKE} DIRPRFX=${_tool}/ install DESTDIR=${XDDESTDIR} .endfor _xi-includes: .PHONY .if !defined(NO_OBJWALK) ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 _obj \ DESTDIR=${XDDESTDIR} .endif ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 includes \ DESTDIR=${XDDESTDIR} _xi-libraries: .PHONY ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 libraries \ DESTDIR=${XDDESTDIR} xdev-links: .PHONY ${_+_}cd ${XDDESTDIR}/usr/bin; \ mkdir -p ../../../../usr/bin; \ for i in *; do \ ln -sf ../../${XDTP}/usr/bin/$$i \ ../../../../usr/bin/${XDDIR}-$$i; \ ln -sf ../../${XDTP}/usr/bin/$$i \ ../../../../usr/bin/${XDDIR}${_REVISION}-$$i; \ done Index: projects/clang700-import/Makefile.libcompat =================================================================== --- projects/clang700-import/Makefile.libcompat (revision 340426) +++ projects/clang700-import/Makefile.libcompat (revision 340427) @@ -1,234 +1,234 @@ # $FreeBSD$ .if !targets(__<${_this:T}>__) __<${_this:T}>__: # Makefile for the compatibility libraries. # - 32-bit compat libraries on MIPS, PowerPC, and AMD64. # ------------------------------------------------------------------- # 32 bit world .if ${TARGET_ARCH} == "amd64" .if empty(TARGET_CPUTYPE) LIB32CPUFLAGS= -march=i686 -mmmx -msse -msse2 .else LIB32CPUFLAGS= -march=${TARGET_CPUTYPE} .endif .if ${WANT_COMPILER_TYPE} == gcc || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc) .else -LIB32CPUFLAGS+= -target x86_64-unknown-freebsd12.0 +LIB32CPUFLAGS+= -target x86_64-unknown-freebsd13.0 .endif LIB32CPUFLAGS+= -m32 LIB32WMAKEENV= MACHINE=i386 MACHINE_ARCH=i386 \ MACHINE_CPU="i686 mmx sse sse2" LIB32WMAKEFLAGS= \ AS="${XAS} --32" \ LD="${XLD} -m elf_i386_fbsd -L${LIBCOMPATTMP}/usr/lib32" \ OBJCOPY="${XOBJCOPY}" .elif ${TARGET_ARCH} == "powerpc64" .if empty(TARGET_CPUTYPE) LIB32CPUFLAGS= -mcpu=powerpc .else LIB32CPUFLAGS= -mcpu=${TARGET_CPUTYPE} .endif LIB32CPUFLAGS+= -m32 LIB32WMAKEENV= MACHINE=powerpc MACHINE_ARCH=powerpc LIB32WMAKEFLAGS= \ LD="${XLD} -m elf32ppc_fbsd" \ OBJCOPY="${XOBJCOPY}" .elif ${TARGET_ARCH:Mmips64*} != "" .if ${WANT_COMPILER_TYPE} == gcc || \ (defined(X_COMPILER_TYPE) && ${X_COMPILER_TYPE} == gcc) .if empty(TARGET_CPUTYPE) LIB32CPUFLAGS= -march=mips3 .else LIB32CPUFLAGS= -march=${TARGET_CPUTYPE} .endif .else .if ${TARGET_ARCH:Mmips64el*} != "" -LIB32CPUFLAGS= -target mipsel-unknown-freebsd12.0 +LIB32CPUFLAGS= -target mipsel-unknown-freebsd13.0 .else -LIB32CPUFLAGS= -target mips-unknown-freebsd12.0 +LIB32CPUFLAGS= -target mips-unknown-freebsd13.0 .endif .endif LIB32CPUFLAGS+= -mabi=32 LIB32WMAKEENV= MACHINE=mips MACHINE_ARCH=mips .if ${TARGET_ARCH:Mmips64el*} != "" LIB32WMAKEFLAGS= LD="${XLD} -m elf32ltsmip_fbsd" .else LIB32WMAKEFLAGS= LD="${XLD} -m elf32btsmip_fbsd" .endif LIB32WMAKEFLAGS+= OBJCOPY="${XOBJCOPY}" .endif LIB32WMAKEFLAGS+= NM="${XNM}" LIB32CFLAGS= -DCOMPAT_32BIT LIB32DTRACE= ${DTRACE} -32 LIB32WMAKEFLAGS+= -DCOMPAT_32BIT # ------------------------------------------------------------------- # soft-fp world .if ${TARGET_ARCH:Marmv[67]*} != "" LIBSOFTCFLAGS= -DCOMPAT_SOFTFP LIBSOFTCPUFLAGS= -mfloat-abi=softfp LIBSOFTWMAKEENV= CPUTYPE=soft MACHINE=arm MACHINE_ARCH=${TARGET_ARCH} LIBSOFTWMAKEFLAGS= -DCOMPAT_SOFTFP .endif # ------------------------------------------------------------------- # Generic code for each type. # Set defaults based on type. libcompat= ${LIBCOMPAT:tl} _LIBCOMPAT_MAKEVARS= _OBJTOP TMP CPUFLAGS CFLAGS CXXFLAGS WMAKEENV \ WMAKEFLAGS WMAKE .for _var in ${_LIBCOMPAT_MAKEVARS} .if !empty(LIB${LIBCOMPAT}${_var}) LIBCOMPAT${_var}?= ${LIB${LIBCOMPAT}${_var}} .endif .endfor # Shared flags LIBCOMPAT_OBJTOP?= ${OBJTOP}/obj-lib${libcompat} LIBCOMPATTMP?= ${LIBCOMPAT_OBJTOP}/tmp LIBCOMPATCFLAGS+= ${LIBCOMPATCPUFLAGS} \ -L${LIBCOMPATTMP}/usr/lib${libcompat} \ --sysroot=${LIBCOMPATTMP} \ ${BFLAGS} # -B is needed to find /usr/lib32/crti.o for GCC and /usr/libsoft/crti.o for # Clang/GCC. LIBCOMPATCFLAGS+= -B${LIBCOMPATTMP}/usr/lib${libcompat} # Yes, the flags are redundant. LIBCOMPATWMAKEENV+= \ INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${TMPPATH} \ SYSROOT=${LIBCOMPATTMP} \ LIBDIR=/usr/lib${libcompat} \ SHLIBDIR=/usr/lib${libcompat} \ DTRACE="${LIB$COMPATDTRACE:U${DTRACE}}" .if ${MK_META_MODE} != "no" # Don't rebuild build-tools targets during normal build. LIBCOMPATWMAKEENV+= BUILD_TOOLS_META=.NOMETA .endif LIBCOMPATWMAKEFLAGS+= CC="${XCC} ${LIBCOMPATCFLAGS}" \ CXX="${XCXX} ${LIBCOMPATCXXFLAGS} ${LIBCOMPATCFLAGS}" \ CPP="${XCPP} ${LIBCOMPATCFLAGS}" \ DESTDIR=${LIBCOMPATTMP} \ -DNO_CPU_CFLAGS \ MK_CTF=no \ -DNO_LINT \ MK_TESTS=no LIBCOMPATWMAKE+= ${LIBCOMPATWMAKEENV} ${MAKE} ${LIBCOMPATWMAKEFLAGS} \ OBJTOP=${LIBCOMPAT_OBJTOP} \ OBJROOT='$${OBJTOP}/' \ MAKEOBJDIRPREFIX= \ MK_MAN=no MK_HTML=no LIBCOMPATIMAKE+= ${LIBCOMPATWMAKE:NINSTALL=*:NDESTDIR=*} \ ${IMAKE_INSTALL} \ -DLIBRARIES_ONLY _LC_LIBDIRS.yes= lib _LC_LIBDIRS.yes+= gnu/lib _LC_LIBDIRS.${MK_CDDL:tl}+= cddl/lib _LC_LIBDIRS.${MK_CRYPT:tl}+= secure/lib _LC_LIBDIRS.${MK_KERBEROS:tl}+= kerberos5/lib _LC_INCDIRS= \ include \ lib/ncurses/ncursesw \ ${_LC_LIBDIRS.yes} .if ${MK_FILE} != "no" _libmagic= lib/libmagic .endif .if ${MK_PMC} != "no" && ${TARGET_ARCH} == "amd64" _jevents= lib/libpmc/pmu-events .endif # Shared logic build${libcompat}: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> stage 5.1: building lib${libcompat} shim libraries" @echo "--------------------------------------------------------------" .if !defined(NO_CLEAN) rm -rf ${LIBCOMPATTMP} .else ${_+_}@if [ -e "${LIBCOMPATTMP}" ]; then \ echo ">>> Deleting stale files in build${libcompat} tree..."; \ cd ${.CURDIR}; ${WMAKE} -DBATCH_DELETE_OLD_FILES \ DESTDIR=${LIBCOMPATTMP} \ delete-old delete-old-libs >/dev/null; \ fi .endif # !defined(NO_CLEAN) mkdir -p ${LIBCOMPATTMP}/usr/include ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${LIBCOMPATTMP}/usr >/dev/null ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${LIBCOMPATTMP}/usr/include >/dev/null ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${LIBCOMPATTMP}/usr >/dev/null .if ${MK_DEBUG_FILES} != "no" ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.debug.dist \ -p ${LIBCOMPATTMP}/usr/lib >/dev/null ${WORLDTMP_MTREE} -f ${.CURDIR}/etc/mtree/BSD.lib${libcompat}.dist \ -p ${LIBCOMPATTMP}/usr/lib/debug/usr >/dev/null .endif mkdir -p ${WORLDTMP} ln -sf ${.CURDIR}/sys ${WORLDTMP} .for _t in ${_obj} includes .for _dir in ${_LC_INCDIRS} ${_+_}cd ${.CURDIR}/${_dir}; ${LIBCOMPATWMAKE} MK_INCLUDES=yes \ DIRPRFX=${_dir}/ ${_t} .endfor .endfor .for _dir in lib/ncurses/ncurses lib/ncurses/ncursesw ${_libmagic} ${_jevents} .for _t in ${_obj} build-tools ${_+_}cd ${.CURDIR}/${_dir}; \ WORLDTMP=${WORLDTMP} \ MAKEFLAGS="-m ${.CURDIR}/tools/build/mk ${.MAKEFLAGS}" \ ${MAKE} SSP_CFLAGS= DESTDIR= \ OBJTOP=${LIBCOMPAT_OBJTOP} \ OBJROOT='$${OBJTOP}/' \ MAKEOBJDIRPREFIX= \ DIRPRFX=${_dir}/ -DNO_LINT -DNO_CPU_CFLAGS \ MK_CTF=no MK_RETPOLINE=no MK_WARNS=no \ ${_t} .endfor .endfor ${_+_}cd ${.CURDIR}; \ ${LIBCOMPATWMAKE} -f Makefile.inc1 -DNO_FSCHG libraries .if ${libcompat} == "32" .for _t in ${_obj} all .if !defined(NO_RTLD) ${_+_}cd ${.CURDIR}/libexec/rtld-elf; PROG=ld-elf32.so.1 ${LIBCOMPATWMAKE} \ -DNO_FSCHG DIRPRFX=libexec/rtld-elf/ ${_t} .endif ${_+_}cd ${.CURDIR}/usr.bin/ldd; PROG=ldd32 ${LIBCOMPATWMAKE} \ DIRPRFX=usr.bin/ldd ${_t} .endfor .endif distribute${libcompat} install${libcompat}: .PHONY .for _dir in ${_LC_LIBDIRS.yes} ${_+_}cd ${.CURDIR}/${_dir}; ${LIBCOMPATIMAKE} ${.TARGET:S/${libcompat}$//} .endfor .if ${libcompat} == "32" .if !defined(NO_RTLD) ${_+_}cd ${.CURDIR}/libexec/rtld-elf; \ PROG=ld-elf32.so.1 ${LIBCOMPATIMAKE} ${.TARGET:S/32$//} .endif ${_+_}cd ${.CURDIR}/usr.bin/ldd; PROG=ldd32 ${LIBCOMPATIMAKE} \ ${.TARGET:S/32$//} .endif .endif Index: projects/clang700-import/lib/Makefile =================================================================== --- projects/clang700-import/lib/Makefile (revision 340426) +++ projects/clang700-import/lib/Makefile (revision 340427) @@ -1,216 +1,212 @@ # @(#)Makefile 8.1 (Berkeley) 6/4/93 # $FreeBSD$ .include # The SUBDIR_BOOTSTRAP list is a small set of libraries which are used by many # of the other libraries. These are built first with a .WAIT between them # and the main list to avoid needing a SUBDIR_DEPEND line on every library # naming just these few items. SUBDIR_BOOTSTRAP= \ csu \ .WAIT \ libc \ libc_nonshared \ libcompiler_rt \ ${_libclang_rt} \ ${_libcplusplus} \ ${_libcxxrt} \ libelf \ msun # The main list; please keep these sorted alphabetically. SUBDIR= ${SUBDIR_BOOTSTRAP} \ .WAIT \ geom \ libalias \ libarchive \ libauditd \ libbegemot \ libblocksruntime \ libbsdstat \ libbsm \ libbz2 \ libcalendar \ libcam \ libcapsicum \ libcasper \ libcompat \ libcrypt \ libdevctl \ libdevinfo \ libdevstat \ - ${_libdl} \ + libdl \ libdwarf \ libedit \ libelftc \ libevent \ libexecinfo \ libexpat \ libfetch \ libfigpar \ libgeom \ libifconfig \ libipsec \ libjail \ libkiconv \ libkvm \ liblzma \ libmemstat \ libmd \ libmt \ lib80211 \ libnetbsd \ libnv \ libopenbsd \ libopie \ libpam \ libpathconv \ libpcap \ libpjdlog \ ${_libproc} \ libprocstat \ libregex \ librpcsvc \ librss \ librt \ ${_librtld_db} \ libsbuf \ libsmb \ libsqlite3 \ libstdbuf \ libstdthreads \ libsysdecode \ libtacplus \ libthread_db \ libucl \ libufs \ libugidfw \ libulog \ libutil \ ${_libvgl} \ libwrap \ libxo \ liby \ libz \ libzstd \ ncurses # Inter-library dependencies. When the makefile for a library contains LDADD # libraries, those libraries should be listed as build order dependencies here. SUBDIR_DEPEND_geom= libufs SUBDIR_DEPEND_libarchive= libz libbz2 libexpat liblzma libmd SUBDIR_DEPEND_libauditdm= libbsm SUBDIR_DEPEND_libbsnmp= ${_libnetgraph} SUBDIR_DEPEND_libc++:= libcxxrt SUBDIR_DEPEND_libc= libcompiler_rt SUBDIR_DEPEND_libcam= libsbuf SUBDIR_DEPEND_libcasper= libnv SUBDIR_DEPEND_libdevstat= libkvm SUBDIR_DEPEND_libdpv= libfigpar ncurses libutil SUBDIR_DEPEND_libedit= ncurses SUBDIR_DEPEND_libgeom= libexpat libsbuf SUBDIR_DEPEND_librpcsec_gss= libgssapi SUBDIR_DEPEND_libmagic= libz SUBDIR_DEPEND_libmemstat= libkvm SUBDIR_DEPEND_libopie= libmd SUBDIR_DEPEND_libpam= libcrypt libopie ${_libradius} librpcsvc libtacplus libutil ${_libypclnt} ${_libcom_err} SUBDIR_DEPEND_libpjdlog= libutil SUBDIR_DEPEND_libprocstat= libkvm libutil SUBDIR_DEPEND_libradius= libmd SUBDIR_DEPEND_libsmb= libkiconv SUBDIR_DEPEND_libtacplus= libmd SUBDIR_DEPEND_libulog= libmd SUBDIR_DEPEND_libunbound= ${_libldns} SUBDIR_DEPEND_liblzma= ${_libthr} .if ${MK_OFED} != "no" SUBDIR_DEPEND_libpcap= ofed .endif # NB: keep these sorted by MK_* knobs SUBDIR.${MK_ATM}+= libngatm SUBDIR.${MK_BLACKLIST}+=libblacklist SUBDIR.${MK_BLUETOOTH}+=libbluetooth libsdp SUBDIR.${MK_BSNMP}+= libbsnmp .if !defined(COMPAT_32BIT) && !defined(COMPAT_SOFTFP) SUBDIR.${MK_CLANG}+= clang .endif SUBDIR.${MK_CUSE}+= libcuse SUBDIR.${MK_CXX}+= libdevdctl SUBDIR.${MK_TOOLCHAIN}+=libpe SUBDIR.${MK_DIALOG}+= libdpv SUBDIR.${MK_FILE}+= libmagic SUBDIR.${MK_GPIO}+= libgpio SUBDIR.${MK_GSSAPI}+= libgssapi librpcsec_gss SUBDIR.${MK_ICONV}+= libiconv_modules SUBDIR.${MK_KERBEROS_SUPPORT}+= libcom_err SUBDIR.${MK_LDNS}+= libldns # The libraries under libclang_rt can only be built by clang, and only make # sense to build when clang is enabled at all. Furthermore, they can only be # built for certain architectures. .if ${MK_CLANG} != "no" && ${COMPILER_TYPE} == "clang" && \ (${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "arm" || ${MACHINE_CPUARCH} == "i386") _libclang_rt= libclang_rt .endif .if ${MK_LIBCPLUSPLUS} != "no" _libcxxrt= libcxxrt _libcplusplus= libc++ _libcplusplus+= libc++experimental .endif SUBDIR.${MK_EFI}+= libefivar SUBDIR.${MK_LIBTHR}+= libthr SUBDIR.${MK_LLVM_LIBUNWIND}+= libgcc_eh SUBDIR.${MK_LLVM_LIBUNWIND}+= libgcc_s SUBDIR.${MK_NAND}+= libnandfs SUBDIR.${MK_NETGRAPH}+= libnetgraph SUBDIR.${MK_NIS}+= libypclnt .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" _libvgl= libvgl .endif .if ${MACHINE_CPUARCH} == "aarch64" SUBDIR.${MK_PMC}+= libopencsd .endif .if ${MACHINE_CPUARCH} == "amd64" SUBDIR.${MK_PMC}+= libipt SUBDIR.${MK_BHYVE}+= libvmmapi .endif .if ${MACHINE_CPUARCH} != "sparc64" _libproc= libproc _librtld_db= librtld_db -.endif - -.if defined(LINKER_FEATURES) && ${LINKER_FEATURES:Mfilter} -_libdl= libdl .endif SUBDIR.${MK_OPENSSL}+= libmp SUBDIR.${MK_PMC}+= libpmc libpmcstat SUBDIR.${MK_RADIUS_SUPPORT}+= libradius SUBDIR.${MK_SENDMAIL}+= libmilter libsm libsmdb libsmutil SUBDIR.${MK_TELNET}+= libtelnet SUBDIR.${MK_TESTS_SUPPORT}+= atf SUBDIR.${MK_TESTS}+= tests SUBDIR.${MK_UNBOUND}+= libunbound SUBDIR.${MK_USB}+= libusbhid libusb SUBDIR.${MK_OFED}+= ofed SUBDIR.${MK_ZFS}+= libbe .if !make(install) SUBDIR_PARALLEL= .endif .include Index: projects/clang700-import/lib/csu/common/crtbegin.c =================================================================== --- projects/clang700-import/lib/csu/common/crtbegin.c (revision 340426) +++ projects/clang700-import/lib/csu/common/crtbegin.c (revision 340427) @@ -1,107 +1,128 @@ /*- * SPDX-License-Identifier: BSD-1-Clause * * Copyright 2018 Andrew Turner * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include "crt.h" typedef void (*crt_func)(void); extern void *__dso_handle __hidden; -#ifdef SHARED -void *__dso_handle = &__dso_handle; -#else +#ifndef SHARED void *__dso_handle = 0; +#else +void *__dso_handle = &__dso_handle; +void __cxa_finalize(void *) __weak_symbol; + +/* + * Call __cxa_finalize with the dso handle in shared objects. + * When we have ctors/dtors call from the dtor handler before calling + * any dtors, otherwise use a destructor. + */ +#ifndef HAVE_CTORS +__attribute__((destructor)) #endif +static void +run_cxa_finalize(void) +{ + if (__cxa_finalize != NULL) + __cxa_finalize(__dso_handle); +} +#endif + /* * On some architectures and toolchains we may need to call the .dtors. * These are called in the order they are in the ELF file. */ #ifdef HAVE_CTORS static void __do_global_dtors_aux(void) __used; static crt_func __CTOR_LIST__[] __section(".ctors") __used = { (crt_func)-1 }; static crt_func __DTOR_LIST__[] __section(".dtors") __used = { (crt_func)-1 }; static void __do_global_dtors_aux(void) { crt_func fn; int n; + +#ifdef SHARED + run_cxa_finalize(); +#endif for (n = 1;; n++) { fn = __DTOR_LIST__[n]; if (fn == (crt_func)0 || fn == (crt_func)-1) break; fn(); } } asm ( ".pushsection .fini \n" "\t" INIT_CALL_SEQ(__do_global_dtors_aux) "\n" ".popsection \n" ); #endif /* * Handler for gcj. These provide a _Jv_RegisterClasses function and fill * out the .jcr section. We just need to call this function with a pointer * to the appropriate section. */ extern void _Jv_RegisterClasses(void *) __weak_symbol; static void register_classes(void) __used; static crt_func __JCR_LIST__[] __section(".jcr") __used = { }; #ifndef CTORS_CONSTRUCTORS __attribute__((constructor)) #endif static void register_classes(void) { if (_Jv_RegisterClasses != NULL && __JCR_LIST__[0] != 0) _Jv_RegisterClasses(__JCR_LIST__); } /* * We can't use constructors when they use the .ctors section as they may be * placed before __CTOR_LIST__. */ #ifdef CTORS_CONSTRUCTORS asm ( ".pushsection .init \n" "\t" INIT_CALL_SEQ(register_classes) "\n" ".popsection \n" ); #endif Index: projects/clang700-import/lib/libc/Versions.def =================================================================== --- projects/clang700-import/lib/libc/Versions.def (revision 340426) +++ projects/clang700-import/lib/libc/Versions.def (revision 340427) @@ -1,42 +1,45 @@ # $FreeBSD$ # # Note: Whenever bumping the FBSD version, always make # FBSDprivate_1.0 depend on the new FBSD version. # This will keep it at the end of the dependency chain. # # This is our first version; it depends on no other. # This version was first added to 7.0-current. FBSD_1.0 { }; # This version was first added to 8.0-current. FBSD_1.1 { } FBSD_1.0; # This version was first added to 9.0-current. FBSD_1.2 { } FBSD_1.1; # This version was first added to 10.0-current. FBSD_1.3 { } FBSD_1.2; # This version was first added to 11.0-current. FBSD_1.4 { } FBSD_1.3; # This version was first added to 12.0-current. FBSD_1.5 { } FBSD_1.4; +# This version was first added to 13.0-current. +FBSD_1.6 { +} FBSD_1.5; # This is our private namespace. Any global interfaces that are # strictly for use only by other FreeBSD applications and libraries # are listed here. We use a separate namespace so we can write # simple ABI-checking tools. # # Please do NOT increment the version of this namespace. FBSDprivate_1.0 { -} FBSD_1.5; +} FBSD_1.6; Index: projects/clang700-import/lib/libcasper/services/Makefile =================================================================== --- projects/clang700-import/lib/libcasper/services/Makefile (revision 340426) +++ projects/clang700-import/lib/libcasper/services/Makefile (revision 340427) @@ -1,16 +1,17 @@ # $FreeBSD$ .include SUBDIR= cap_dns +SUBDIR+= cap_fileargs SUBDIR+= cap_grp SUBDIR+= cap_pwd SUBDIR+= cap_random SUBDIR+= cap_sysctl SUBDIR+= cap_syslog SUBDIR.${MK_TESTS}+= tests SUBDIR_PARALLEL= .include Index: projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.c =================================================================== --- projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.c (nonexistent) +++ projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.c (revision 340427) @@ -0,0 +1,505 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2018 Mariusz Zaborski + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include "cap_fileargs.h" + +#define CACHE_SIZE 128 + +#define FILEARGS_MAGIC 0xFA00FA00 + +struct fileargs { + uint32_t fa_magic; + nvlist_t *fa_cache; + cap_channel_t *fa_chann; + int fa_fdflags; +}; + +static int +fileargs_get_cache(fileargs_t *fa, const char *name) +{ + int fd; + const nvlist_t *nvl; + nvlist_t *tnvl; + + assert(fa != NULL); + assert(fa->fa_magic == FILEARGS_MAGIC); + assert(name != NULL); + + if (fa->fa_cache == NULL) + return (-1); + + if ((fa->fa_fdflags & O_CREAT) != 0) + return (-1); + + nvl = dnvlist_get_nvlist(fa->fa_cache, name, NULL); + if (nvl == NULL) + return (-1); + + tnvl = nvlist_take_nvlist(fa->fa_cache, name); + fd = nvlist_take_descriptor(tnvl, "fd"); + nvlist_destroy(tnvl); + + if ((fa->fa_fdflags & O_CLOEXEC) != O_CLOEXEC) { + if (fcntl(fd, F_SETFD, fa->fa_fdflags) == -1) { + close(fd); + return (-1); + } + } + + return (fd); +} + +static void +fileargs_set_cache(fileargs_t *fa, nvlist_t *nvl) +{ + + nvlist_destroy(fa->fa_cache); + fa->fa_cache = nvl; +} + +static nvlist_t* +fileargs_fetch(fileargs_t *fa, const char *name) +{ + nvlist_t *nvl; + int serrno; + + assert(fa != NULL); + assert(name != NULL); + + nvl = nvlist_create(NV_FLAG_NO_UNIQUE); + nvlist_add_string(nvl, "cmd", "open"); + nvlist_add_string(nvl, "name", name); + + nvl = cap_xfer_nvlist(fa->fa_chann, nvl); + if (nvl == NULL) + return (NULL); + + if (nvlist_get_number(nvl, "error") != 0) { + serrno = (int)nvlist_get_number(nvl, "error"); + nvlist_destroy(nvl); + errno = serrno; + return (NULL); + } + + return (nvl); +} + +static nvlist_t * +fileargs_create_limit(int argc, const char * const *argv, int flags, + mode_t mode, cap_rights_t *rightsp) +{ + nvlist_t *limits; + int i; + + limits = nvlist_create(NV_FLAG_NO_UNIQUE); + if (limits == NULL) + return (NULL); + + nvlist_add_number(limits, "flags", flags); + if (rightsp != NULL) { + nvlist_add_binary(limits, "cap_rights", rightsp, + sizeof(*rightsp)); + } + if ((flags & O_CREAT) != 0) + nvlist_add_number(limits, "mode", (uint64_t)mode); + + for (i = 0; i < argc; i++) { + nvlist_add_null(limits, argv[i]); + } + + return (limits); +} + +static fileargs_t * +fileargs_create(cap_channel_t *chan, int fdflags) +{ + fileargs_t *fa; + + fa = malloc(sizeof(*fa)); + if (fa != NULL) { + fa->fa_cache = NULL; + fa->fa_chann = chan; + fa->fa_fdflags = fdflags; + fa->fa_magic = FILEARGS_MAGIC; + } + + return (fa); +} + +fileargs_t * +fileargs_init(int argc, char *argv[], int flags, mode_t mode, + cap_rights_t *rightsp) +{ + nvlist_t *limits; + + if (argc <= 0 || argv == NULL) { + return (fileargs_create(NULL, 0)); + } + + limits = fileargs_create_limit(argc, (const char * const *)argv, flags, + mode, rightsp); + if (limits == NULL) + return (NULL); + + return (fileargs_initnv(limits)); +} + +fileargs_t * +fileargs_cinit(cap_channel_t *cas, int argc, char *argv[], int flags, + mode_t mode, cap_rights_t *rightsp) +{ + nvlist_t *limits; + + if (argc <= 0 || argv == NULL) { + return (fileargs_create(NULL, 0)); + } + + limits = fileargs_create_limit(argc, (const char * const *)argv, flags, + mode, rightsp); + if (limits == NULL) + return (NULL); + + return (fileargs_cinitnv(cas, limits)); +} + +fileargs_t * +fileargs_initnv(nvlist_t *limits) +{ + cap_channel_t *cas; + fileargs_t *fa; + + if (limits == NULL) { + return (fileargs_create(NULL, 0)); + } + + cas = cap_init(); + if (cas == NULL) { + nvlist_destroy(limits); + return (NULL); + } + + fa = fileargs_cinitnv(cas, limits); + cap_close(cas); + + return (fa); +} + +fileargs_t * +fileargs_cinitnv(cap_channel_t *cas, nvlist_t *limits) +{ + cap_channel_t *chann; + fileargs_t *fa; + int serrno, ret; + int flags; + + assert(cas != NULL); + + if (limits == NULL) { + return (fileargs_create(NULL, 0)); + } + + chann = NULL; + fa = NULL; + + chann = cap_service_open(cas, "system.fileargs"); + if (chann == NULL) { + nvlist_destroy(limits); + return (NULL); + } + + flags = nvlist_get_number(limits, "flags"); + + /* Limits are consumed no need to free them. */ + ret = cap_limit_set(chann, limits); + if (ret < 0) + goto out; + + fa = fileargs_create(chann, flags); + if (fa == NULL) + goto out; + + return (fa); +out: + serrno = errno; + if (chann != NULL) + cap_close(chann); + errno = serrno; + return (NULL); +} + +int +fileargs_open(fileargs_t *fa, const char *name) +{ + int fd; + nvlist_t *nvl; + char *cmd; + + assert(fa != NULL); + assert(fa->fa_magic == FILEARGS_MAGIC); + + if (name == NULL) { + errno = EINVAL; + return (-1); + } + + if (fa->fa_chann == NULL) { + errno = ENOTCAPABLE; + return (-1); + } + + fd = fileargs_get_cache(fa, name); + if (fd != -1) + return (fd); + + nvl = fileargs_fetch(fa, name); + if (nvl == NULL) + return (-1); + + fd = nvlist_take_descriptor(nvl, "fd"); + cmd = nvlist_take_string(nvl, "cmd"); + if (strcmp(cmd, "cache") == 0) + fileargs_set_cache(fa, nvl); + else + nvlist_destroy(nvl); + free(cmd); + + return (fd); +} + +FILE * +fileargs_fopen(fileargs_t *fa, const char *name, const char *mode) +{ + int fd; + + if ((fd = fileargs_open(fa, name)) < 0) { + return (NULL); + } + + return (fdopen(fd, mode)); +} + +void +fileargs_free(fileargs_t *fa) +{ + + if (fa == NULL) + return; + + assert(fa->fa_magic == FILEARGS_MAGIC); + + nvlist_destroy(fa->fa_cache); + if (fa->fa_chann != NULL) { + cap_close(fa->fa_chann); + } + explicit_bzero(&fa->fa_magic, sizeof(fa->fa_magic)); + free(fa); +} + +/* + * Service functions. + */ + +static const char *lastname; +static void *cacheposition; +static bool allcached; +static const cap_rights_t *caprightsp; +static int capflags; +static mode_t capmode; + +static int +open_file(const char *name) +{ + int fd, serrno; + + if ((capflags & O_CREAT) == 0) + fd = open(name, capflags); + else + fd = open(name, capflags, capmode); + if (fd < 0) + return (-1); + + if (caprightsp != NULL) { + if (cap_rights_limit(fd, caprightsp) < 0) { + serrno = errno; + close(fd); + errno = serrno; + return (-1); + } + } + + return (fd); +} + +static void +fileargs_add_cache(nvlist_t *nvlout, const nvlist_t *limits, + const char *curent_name) +{ + int type, i, fd; + void *cookie; + nvlist_t *new; + const char *fname; + + if ((capflags & O_CREAT) != 0) { + allcached = true; + return; + } + + cookie = cacheposition; + for (i = 0; i < CACHE_SIZE + 1; i++) { + fname = nvlist_next(limits, &type, &cookie); + if (fname == NULL) { + cacheposition = NULL; + lastname = NULL; + allcached = true; + return; + } + /* We doing that to catch next element name. */ + if (i == CACHE_SIZE) { + break; + } + + if (type != NV_TYPE_NULL || + (curent_name != NULL && strcmp(fname, curent_name) == 0)) { + curent_name = NULL; + i--; + continue; + } + + fd = open_file(fname); + if (fd < 0) { + i--; + continue; + } + + new = nvlist_create(NV_FLAG_NO_UNIQUE); + nvlist_move_descriptor(new, "fd", fd); + nvlist_add_nvlist(nvlout, fname, new); + } + cacheposition = cookie; + lastname = fname; +} + +static bool +fileargs_allowed(const nvlist_t *limits, const nvlist_t *request) +{ + const char *name; + + name = dnvlist_get_string(request, "name", NULL); + if (name == NULL) + return (false); + + /* Fast path. */ + if (lastname != NULL && strcmp(name, lastname) == 0) + return (true); + + if (!nvlist_exists_null(limits, name)) + return (false); + + return (true); +} + +static int +fileargs_limit(const nvlist_t *oldlimits, const nvlist_t *newlimits) +{ + + if (oldlimits != NULL) + return (ENOTCAPABLE); + + capflags = (int)dnvlist_get_number(newlimits, "flags", 0); + if ((capflags & O_CREAT) != 0) + capmode = (mode_t)nvlist_get_number(newlimits, "mode"); + else + capmode = 0; + + caprightsp = dnvlist_get_binary(newlimits, "cap_rights", NULL, NULL, 0); + + return (0); +} + +static int +fileargs_command_open(const nvlist_t *limits, nvlist_t *nvlin, + nvlist_t *nvlout) +{ + int fd; + const char *name; + + if (limits == NULL) + return (ENOTCAPABLE); + + if (!fileargs_allowed(limits, nvlin)) + return (ENOTCAPABLE); + + name = nvlist_get_string(nvlin, "name"); + + fd = open_file(name); + if (fd < 0) + return (errno); + + if (!allcached && (lastname == NULL || + strcmp(name, lastname) == 0)) { + nvlist_add_string(nvlout, "cmd", "cache"); + fileargs_add_cache(nvlout, limits, name); + } else { + nvlist_add_string(nvlout, "cmd", "open"); + } + nvlist_move_descriptor(nvlout, "fd", fd); + return (0); +} + +static int +fileargs_command(const char *cmd, const nvlist_t *limits, + nvlist_t *nvlin, nvlist_t *nvlout) +{ + + if (strcmp(cmd, "open") == 0) + return (fileargs_command_open(limits, nvlin, nvlout)); + + return (EINVAL); +} + +CREATE_SERVICE("system.fileargs", fileargs_limit, fileargs_command, + CASPER_SERVICE_FD | CASPER_SERVICE_STDIO | CASPER_SERVICE_NO_UNIQ_LIMITS); Property changes on: projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang700-import/lib/libcasper/services/cap_fileargs/Makefile =================================================================== --- projects/clang700-import/lib/libcasper/services/cap_fileargs/Makefile (nonexistent) +++ projects/clang700-import/lib/libcasper/services/cap_fileargs/Makefile (revision 340427) @@ -0,0 +1,35 @@ +# $FreeBSD$ + +SHLIBDIR?= /lib/casper + +.include + +PACKAGE=libcasper + +SHLIB_MAJOR= 1 +INCSDIR?= ${INCLUDEDIR}/casper + +.if ${MK_CASPER} != "no" +SHLIB= cap_fileargs + +SRCS= cap_fileargs.c +.endif + +INCS= cap_fileargs.h + +LIBADD= nv + +CFLAGS+=-I${.CURDIR} + +MAN+= cap_fileargs.3 + +MLINKS+=cap_fileargs.3 libcap_fileargs.3 +MLINKS+=cap_fileargs.3 fileargs_cinit.3 +MLINKS+=cap_fileargs.3 fileargs_cinitnv.3 +MLINKS+=cap_fileargs.3 fileargs_fopen.3 +MLINKS+=cap_fileargs.3 fileargs_free.3 +MLINKS+=cap_fileargs.3 fileargs_init.3 +MLINKS+=cap_fileargs.3 fileargs_initnv.3 +MLINKS+=cap_fileargs.3 fileargs_open.3 + +.include Property changes on: projects/clang700-import/lib/libcasper/services/cap_fileargs/Makefile ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.3 =================================================================== --- projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.3 (nonexistent) +++ projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.3 (revision 340427) @@ -0,0 +1,241 @@ +.\" Copyright (c) 2018 Mariusz Zaborski +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $FreeBSD$ +.\" +.Dd November 12, 2018 +.Dt CAP_FILEARGS 3 +.Os +.Sh NAME +.Nm fileargs_cinit , +.Nm fileargs_cinitnv , +.Nm fileargs_init , +.Nm fileargs_initnv , +.Nm fileargs_free , +.Nm fileargs_open , +.Nm fileargs_fopen +.Nd "library for handling files in capability mode" +.Sh LIBRARY +.Lb libcap_fileargs +.Sh SYNOPSIS +.In sys/nv.h +.In libcasper.h +.In casper/cap_fileargs.h +.Ft "fileargs_t *" +.Fn fileargs_init "int argc" "char *argv[]" "int flags" "mode_t mode" "cap_rights_t *rightsp" +.Ft "fileargs_t *" +.Fn fileargs_cinit "cap_channel_t *cas" "int argc" "char *argv[]" "int flags" "mode_t mode" "cap_rights_t *rightsp" +.Ft "fileargs_t *" +.Fn fileargs_cinitnv "cap_channel_t *cas" "nvlist_t *limits" +.Ft "fileargs_t *" +.Fn fileargs_initnv "nvlist_t *limits" +.Ft "void" +.Fn fileargs_free "fileargs_t *fa" +.Ft "int" +.Fn fileargs_open "fileargs_t *fa" "const char *name" +.Ft "FILE *" +.Fn fileargs_fopen "fileargs_t *fa" "const char *name" "const char *mode" +.Sh DESCRIPTION +The library is used to simplify Capsicumizing a tools that are using file system. +Idea behind the library is that we are passing a remaining +.Fa argc +and +.Fa argv +which contains a list of files that should be open for this program. +The library will create a service that will serve those files. +.Pp +The function +.Fn fileargs_init +create a service to the +.Nm system.fileargs . +The +.Fa argv +contains a list of files that should be opened. +The argument can be set to +.Dv NULL +which will not create a service and all files will be prohibited to be opened. +The +.Fa argc +argument contains a number of passed files. +The +.Fa flags +argument limits opened files for either execution or reading and/or writing. +The +.Fa mode +argument tells which what mode file should be created if the +.Dv O_CREATE +flag is present . +For more details of the +.Fa flags +and +.Fa mode +arguments see +.Xr open 2 . +The +.Fa rightsp +argument contains a list of the capability rights which file should be limited to. +For more details of the capability rights see +.Xr cap_rights_init 3 . +.Pp +The function +.Fn fileargs_cinit +is equivalent to +.Fn fileargs_init +except that the connection to the Casper needs to be provided. +.Pp +The functions +.Fn fileargs_ininv +and +.Fn fileargs_cininv +are respectively equivalent to +.Fn fileargs_init +and +.Fn fileargs_cinit +expect that all arguments all provided as +.Xr nvlist 9 . +For details see +.Sx LIMITS . +.Pp +The +.Fa fileargs_free +close connection to the +.Nm system.filerags +service and free are structures. +The function handle +.Dv NULL +argument. +.Pp +The functions +.Fn fileargs_open +and +.Fn fileargs_fopen +are respectively equivalent to +.Xr open 2 +and +.Xr fopen 3 +expect that all arguments are fetched from the +.Va fileargs_t +structure. +.Sh LIMITS +This section describe which values and types should be used to pass arguments to the +.Fa system.filerags +through the +.Fn fileargs_ininv +and +.Fn fileargs_cinit +functions. +The +.Xr nvlist 9 +for that functions must contain the following values and types: +.Bl -ohang -offset indent +.It flags ( NV_TYPE_NUMBER ) +The +.Va flags +limits opened files for either execution or reading and/or writing. +.It mode (NV_TYPE_NUMBER) +If in the +.Va flags +argument the +.Dv O_CREATE +flag was defined the +.Xr nvlist 9 +must contain the +.Va mode . +The +.Va mode +argument tells which what mode file should be created. +.El +.Pp +The +.Xr nvlist 9 +for that functions may contain the following values and types: +.Bl -ohang -offset indent +.It cap_rights ( NV_TYPE_BINARY ) +The +.Va cap_rights +argument contains a list of the capability rights which file should be limited to. +.It ( NV_TYPE_NULL ) +Any number of +.Dv NV_TYPE_NULL +where the name of the element is name of the file which can be opened. +.Sh EXAMPLES +The following example first parse some options and then create the +.Nm system.filerags +service with remaining arguments. +.Bd -literal +int ch, fd, i; +cap_rights_t rights; +fileargs_t *fa; + +while ((ch = getopt(argc, argv, "h")) != -1) { + switch (ch) { + case 'h': + default: + usage(); + } +} + +argc -= optind; +argv += optind; + +/* Create capability to the system.fileargs service. */ +fa = fileargs_init(argc, argv, O_RDONLY, 0, + cap_rights_init(&rights, CAP_READ)); +if (fa == NULL) + err(1, "unable to open system.fileargs service"); + +/* Enter capability mode sandbox. */ +if (cap_enter() < 0 && errno != ENOSYS) + err(1, "unable to enter capability mode"); + +/* Open files. */ +for (i = 0; i < argc; i++) { + fd = fileargs_open(fa, argv[i]); + if (fd < 0) + err(1, "unable to open file %s", argv[i]); + printf("File %s opened in capability mode\n", argv[i]); + close(fd); +} + +fileargs_free(fa); +.Ed +.Sh SEE ALSO +.Xr cap_enter 2 , +.Xr open 2 , +.Xr cap_rights_init 3 , +.Xr err 3 , +.Xr fopen 3, +.Xr getopt 3, +.Xr capsicum 4 , +.Xr nv 9 +.Sh BUGS +The +.Lb cap_fileargs +included in +.Fx +is considered experimental, and should not be deployed in production +environments without careful consideration of the risks associated with +the use of experimental operating system features. +.Sh AUTHORS +.An Mariusz Zaborski Aq Mt oshogbo@FreeBSD.org Property changes on: projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.3 ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.h =================================================================== --- projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.h (nonexistent) +++ projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.h (revision 340427) @@ -0,0 +1,108 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2018 Mariusz Zaborski + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _FILEARGS_H_ +#define _FILEARGS_H_ + +#include +#include + +#include + +#ifdef WITH_CASPER +struct fileargs; +typedef struct fileargs fileargs_t; + +fileargs_t *fileargs_init(int argc, char *argv[], int flags, mode_t mode, + cap_rights_t *rightsp); +fileargs_t *fileargs_cinit(cap_channel_t *cas, int argc, char *argv[], + int flags, mode_t mode, cap_rights_t *rightsp); +fileargs_t *fileargs_initnv(nvlist_t *limits); +fileargs_t *fileargs_cinitnv(cap_channel_t *cas, nvlist_t *limits); +int fileargs_open(fileargs_t *fa, const char *name); +void fileargs_free(fileargs_t *fa); +FILE *fileargs_fopen(fileargs_t *fa, const char *name, const char *mode); +#else +typedef struct fileargs { + int fa_flags; + mode_t fa_mode; +} fileargs_t; + +static inline fileargs_t * +fileargs_init(int argc __unused, char *argv[] __unused, int flags, mode_t mode, + cap_rights_t *rightsp __unused) { + fileargs_t *fa; + + fa = malloc(sizeof(*fa)); + if (fa != NULL) { + fa->fa_flags = flags; + fa->fa_mode = mode; + } + + return (fa); +} + +static inline fileargs_t * +fileargs_cinit(cap_channel_t *cas __unused, int argc, char *argv[], int flags, + mode_t mode, cap_rights_t *rightsp) +{ + + return (fileargs_init(argc, argv, flags, mode, rightsp)); +} + +static inline fileargs_t * +fileargs_initnv(nvlist_t *limits) +{ + fileargs_t *fa; + + fa = fileargs_init(0, NULL, + nvlist_get_number(limits, "flags"), + dnvlist_get_number(limits, "mode", 0), + NULL); + nvlist_destroy(limits); + + return (fa); +} + +static inline fileargs_t * +fileargs_cinitnv(cap_channel_t *cas __unused, nvlist_t *limits) +{ + + return (fileargs_initnv(limits)); +} + +#define fileargs_open(fa, name) \ + open(name, fa->fa_flags, fa->fa_mode) +#define fileargs_fopen(fa, name, mode) \ + fopen(name, mode) +#define fileargs_free(fa) (free(fa)) +#endif + +#endif /* !_FILEARGS_H_ */ Property changes on: projects/clang700-import/lib/libcasper/services/cap_fileargs/cap_fileargs.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang700-import/lib/libnv/common_impl.h =================================================================== --- projects/clang700-import/lib/libnv/common_impl.h (revision 340426) +++ projects/clang700-import/lib/libnv/common_impl.h (revision 340427) @@ -1,39 +1,48 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Pawel Jakub Dawidek under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _COMMON_IMPL_H_ #define _COMMON_IMPL_H_ -#define fd_is_valid(fd) (fcntl((fd), F_GETFL) != -1 || errno != EBADF) +#include +#include +#include + +static inline bool +fd_is_valid(int fd) +{ + + return (fcntl(fd, F_GETFD) != -1 || errno != EBADF); +} #endif /* !_COMMON_IMPL_H_ */ Index: projects/clang700-import/lib/libnv/msgio.c =================================================================== --- projects/clang700-import/lib/libnv/msgio.c (revision 340426) +++ projects/clang700-import/lib/libnv/msgio.c (revision 340427) @@ -1,470 +1,465 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * Copyright (c) 2013 Mariusz Zaborski * All rights reserved. * * This software was developed by Pawel Jakub Dawidek under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #ifdef HAVE_PJDLOG #include #endif #include "common_impl.h" #include "msgio.h" #ifndef HAVE_PJDLOG #include #define PJDLOG_ASSERT(...) assert(__VA_ARGS__) #define PJDLOG_RASSERT(expr, ...) assert(expr) #define PJDLOG_ABORT(...) abort() #endif #define PKG_MAX_SIZE (MCLBYTES / CMSG_SPACE(sizeof(int)) - 1) static int msghdr_add_fd(struct cmsghdr *cmsg, int fd) { PJDLOG_ASSERT(fd >= 0); - if (!fd_is_valid(fd)) { - errno = EBADF; - return (-1); - } - cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(fd)); bcopy(&fd, CMSG_DATA(cmsg), sizeof(fd)); return (0); } static int msghdr_get_fd(struct cmsghdr *cmsg) { int fd; if (cmsg == NULL || cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS || cmsg->cmsg_len != CMSG_LEN(sizeof(fd))) { errno = EINVAL; return (-1); } bcopy(CMSG_DATA(cmsg), &fd, sizeof(fd)); #ifndef MSG_CMSG_CLOEXEC /* * If the MSG_CMSG_CLOEXEC flag is not available we cannot set the * close-on-exec flag atomically, but we still want to set it for * consistency. */ (void) fcntl(fd, F_SETFD, FD_CLOEXEC); #endif return (fd); } static void fd_wait(int fd, bool doread) { fd_set fds; PJDLOG_ASSERT(fd >= 0); FD_ZERO(&fds); FD_SET(fd, &fds); (void)select(fd + 1, doread ? &fds : NULL, doread ? NULL : &fds, NULL, NULL); } static int msg_recv(int sock, struct msghdr *msg) { int flags; PJDLOG_ASSERT(sock >= 0); #ifdef MSG_CMSG_CLOEXEC flags = MSG_CMSG_CLOEXEC; #else flags = 0; #endif for (;;) { fd_wait(sock, true); if (recvmsg(sock, msg, flags) == -1) { if (errno == EINTR) continue; return (-1); } break; } return (0); } static int msg_send(int sock, const struct msghdr *msg) { PJDLOG_ASSERT(sock >= 0); for (;;) { fd_wait(sock, false); if (sendmsg(sock, msg, 0) == -1) { if (errno == EINTR) continue; return (-1); } break; } return (0); } int cred_send(int sock) { unsigned char credbuf[CMSG_SPACE(sizeof(struct cmsgcred))]; struct msghdr msg; struct cmsghdr *cmsg; struct iovec iov; uint8_t dummy; bzero(credbuf, sizeof(credbuf)); bzero(&msg, sizeof(msg)); bzero(&iov, sizeof(iov)); /* * XXX: We send one byte along with the control message, because * setting msg_iov to NULL only works if this is the first * packet send over the socket. Once we send some data we * won't be able to send credentials anymore. This is most * likely a kernel bug. */ dummy = 0; iov.iov_base = &dummy; iov.iov_len = sizeof(dummy); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = credbuf; msg.msg_controllen = sizeof(credbuf); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_len = CMSG_LEN(sizeof(struct cmsgcred)); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_CREDS; if (msg_send(sock, &msg) == -1) return (-1); return (0); } int cred_recv(int sock, struct cmsgcred *cred) { unsigned char credbuf[CMSG_SPACE(sizeof(struct cmsgcred))]; struct msghdr msg; struct cmsghdr *cmsg; struct iovec iov; uint8_t dummy; bzero(credbuf, sizeof(credbuf)); bzero(&msg, sizeof(msg)); bzero(&iov, sizeof(iov)); iov.iov_base = &dummy; iov.iov_len = sizeof(dummy); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = credbuf; msg.msg_controllen = sizeof(credbuf); if (msg_recv(sock, &msg) == -1) return (-1); cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL || cmsg->cmsg_len != CMSG_LEN(sizeof(struct cmsgcred)) || cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_CREDS) { errno = EINVAL; return (-1); } bcopy(CMSG_DATA(cmsg), cred, sizeof(*cred)); return (0); } static int fd_package_send(int sock, const int *fds, size_t nfds) { struct msghdr msg; struct cmsghdr *cmsg; struct iovec iov; unsigned int i; int serrno, ret; uint8_t dummy; PJDLOG_ASSERT(sock >= 0); PJDLOG_ASSERT(fds != NULL); PJDLOG_ASSERT(nfds > 0); bzero(&msg, sizeof(msg)); /* * XXX: Look into cred_send function for more details. */ dummy = 0; iov.iov_base = &dummy; iov.iov_len = sizeof(dummy); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_controllen = nfds * CMSG_SPACE(sizeof(int)); msg.msg_control = calloc(1, msg.msg_controllen); if (msg.msg_control == NULL) return (-1); ret = -1; for (i = 0, cmsg = CMSG_FIRSTHDR(&msg); i < nfds && cmsg != NULL; i++, cmsg = CMSG_NXTHDR(&msg, cmsg)) { if (msghdr_add_fd(cmsg, fds[i]) == -1) goto end; } if (msg_send(sock, &msg) == -1) goto end; ret = 0; end: serrno = errno; free(msg.msg_control); errno = serrno; return (ret); } static int fd_package_recv(int sock, int *fds, size_t nfds) { struct msghdr msg; struct cmsghdr *cmsg; unsigned int i; int serrno, ret; struct iovec iov; uint8_t dummy; PJDLOG_ASSERT(sock >= 0); PJDLOG_ASSERT(nfds > 0); PJDLOG_ASSERT(fds != NULL); bzero(&msg, sizeof(msg)); bzero(&iov, sizeof(iov)); /* * XXX: Look into cred_send function for more details. */ iov.iov_base = &dummy; iov.iov_len = sizeof(dummy); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_controllen = nfds * CMSG_SPACE(sizeof(int)); msg.msg_control = calloc(1, msg.msg_controllen); if (msg.msg_control == NULL) return (-1); ret = -1; if (msg_recv(sock, &msg) == -1) goto end; for (i = 0, cmsg = CMSG_FIRSTHDR(&msg); i < nfds && cmsg != NULL; i++, cmsg = CMSG_NXTHDR(&msg, cmsg)) { fds[i] = msghdr_get_fd(cmsg); if (fds[i] < 0) break; } if (cmsg != NULL || i < nfds) { int fd; /* * We need to close all received descriptors, even if we have * different control message (eg. SCM_CREDS) in between. */ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL; cmsg = CMSG_NXTHDR(&msg, cmsg)) { fd = msghdr_get_fd(cmsg); if (fd >= 0) close(fd); } errno = EINVAL; goto end; } ret = 0; end: serrno = errno; free(msg.msg_control); errno = serrno; return (ret); } int fd_recv(int sock, int *fds, size_t nfds) { unsigned int i, step, j; int ret, serrno; if (nfds == 0 || fds == NULL) { errno = EINVAL; return (-1); } ret = i = step = 0; while (i < nfds) { if (PKG_MAX_SIZE < nfds - i) step = PKG_MAX_SIZE; else step = nfds - i; ret = fd_package_recv(sock, fds + i, step); if (ret != 0) { /* Close all received descriptors. */ serrno = errno; for (j = 0; j < i; j++) close(fds[j]); errno = serrno; break; } i += step; } return (ret); } int fd_send(int sock, const int *fds, size_t nfds) { unsigned int i, step; int ret; if (nfds == 0 || fds == NULL) { errno = EINVAL; return (-1); } ret = i = step = 0; while (i < nfds) { if (PKG_MAX_SIZE < nfds - i) step = PKG_MAX_SIZE; else step = nfds - i; ret = fd_package_send(sock, fds + i, step); if (ret != 0) break; i += step; } return (ret); } int buf_send(int sock, void *buf, size_t size) { ssize_t done; unsigned char *ptr; PJDLOG_ASSERT(sock >= 0); PJDLOG_ASSERT(size > 0); PJDLOG_ASSERT(buf != NULL); ptr = buf; do { fd_wait(sock, false); done = send(sock, ptr, size, 0); if (done == -1) { if (errno == EINTR) continue; return (-1); } else if (done == 0) { errno = ENOTCONN; return (-1); } size -= done; ptr += done; } while (size > 0); return (0); } int buf_recv(int sock, void *buf, size_t size) { ssize_t done; unsigned char *ptr; PJDLOG_ASSERT(sock >= 0); PJDLOG_ASSERT(buf != NULL); ptr = buf; while (size > 0) { fd_wait(sock, true); done = recv(sock, ptr, size, 0); if (done == -1) { if (errno == EINTR) continue; return (-1); } else if (done == 0) { errno = ENOTCONN; return (-1); } size -= done; ptr += done; } return (0); } Index: projects/clang700-import/lib/libnv/tests/nvlist_send_recv_test.c =================================================================== --- projects/clang700-import/lib/libnv/tests/nvlist_send_recv_test.c (revision 340426) +++ projects/clang700-import/lib/libnv/tests/nvlist_send_recv_test.c (revision 340427) @@ -1,341 +1,367 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Pawel Jakub Dawidek under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include static int ntest = 1; #define CHECK(expr) do { \ if ((expr)) \ printf("ok # %d %s:%u\n", ntest, __FILE__, __LINE__); \ else \ printf("not ok # %d %s:%u\n", ntest, __FILE__, __LINE__);\ ntest++; \ } while (0) #define fd_is_valid(fd) (fcntl((fd), F_GETFL) != -1 || errno != EBADF) static void child(int sock) { nvlist_t *nvl; nvlist_t *empty; nvl = nvlist_create(0); empty = nvlist_create(0); nvlist_add_bool(nvl, "nvlist/bool/true", true); nvlist_add_bool(nvl, "nvlist/bool/false", false); nvlist_add_number(nvl, "nvlist/number/0", 0); nvlist_add_number(nvl, "nvlist/number/1", 1); nvlist_add_number(nvl, "nvlist/number/-1", -1); nvlist_add_number(nvl, "nvlist/number/UINT64_MAX", UINT64_MAX); nvlist_add_number(nvl, "nvlist/number/INT64_MIN", INT64_MIN); nvlist_add_number(nvl, "nvlist/number/INT64_MAX", INT64_MAX); nvlist_add_string(nvl, "nvlist/string/", ""); nvlist_add_string(nvl, "nvlist/string/x", "x"); nvlist_add_string(nvl, "nvlist/string/abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"); nvlist_add_descriptor(nvl, "nvlist/descriptor/STDERR_FILENO", STDERR_FILENO); nvlist_add_binary(nvl, "nvlist/binary/x", "x", 1); nvlist_add_binary(nvl, "nvlist/binary/abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz", sizeof("abcdefghijklmnopqrstuvwxyz")); nvlist_move_nvlist(nvl, "nvlist/nvlist/empty", empty); nvlist_add_nvlist(nvl, "nvlist/nvlist", nvl); nvlist_send(sock, nvl); nvlist_destroy(nvl); } static void parent(int sock) { nvlist_t *nvl; const nvlist_t *cnvl, *empty; const char *name, *cname; void *cookie, *ccookie; int type, ctype; size_t size; nvl = nvlist_recv(sock, 0); CHECK(nvlist_error(nvl) == 0); if (nvlist_error(nvl) != 0) err(1, "nvlist_recv() failed"); cookie = NULL; name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_BOOL); CHECK(strcmp(name, "nvlist/bool/true") == 0); CHECK(nvlist_get_bool(nvl, name) == true); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_BOOL); CHECK(strcmp(name, "nvlist/bool/false") == 0); CHECK(nvlist_get_bool(nvl, name) == false); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_NUMBER); CHECK(strcmp(name, "nvlist/number/0") == 0); CHECK(nvlist_get_number(nvl, name) == 0); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_NUMBER); CHECK(strcmp(name, "nvlist/number/1") == 0); CHECK(nvlist_get_number(nvl, name) == 1); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_NUMBER); CHECK(strcmp(name, "nvlist/number/-1") == 0); CHECK((int)nvlist_get_number(nvl, name) == -1); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_NUMBER); CHECK(strcmp(name, "nvlist/number/UINT64_MAX") == 0); CHECK(nvlist_get_number(nvl, name) == UINT64_MAX); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_NUMBER); CHECK(strcmp(name, "nvlist/number/INT64_MIN") == 0); CHECK((int64_t)nvlist_get_number(nvl, name) == INT64_MIN); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_NUMBER); CHECK(strcmp(name, "nvlist/number/INT64_MAX") == 0); CHECK((int64_t)nvlist_get_number(nvl, name) == INT64_MAX); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_STRING); CHECK(strcmp(name, "nvlist/string/") == 0); CHECK(strcmp(nvlist_get_string(nvl, name), "") == 0); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_STRING); CHECK(strcmp(name, "nvlist/string/x") == 0); CHECK(strcmp(nvlist_get_string(nvl, name), "x") == 0); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_STRING); CHECK(strcmp(name, "nvlist/string/abcdefghijklmnopqrstuvwxyz") == 0); CHECK(strcmp(nvlist_get_string(nvl, name), "abcdefghijklmnopqrstuvwxyz") == 0); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_DESCRIPTOR); CHECK(strcmp(name, "nvlist/descriptor/STDERR_FILENO") == 0); CHECK(fd_is_valid(nvlist_get_descriptor(nvl, name))); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_BINARY); CHECK(strcmp(name, "nvlist/binary/x") == 0); CHECK(memcmp(nvlist_get_binary(nvl, name, NULL), "x", 1) == 0); CHECK(memcmp(nvlist_get_binary(nvl, name, &size), "x", 1) == 0); CHECK(size == 1); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_BINARY); CHECK(strcmp(name, "nvlist/binary/abcdefghijklmnopqrstuvwxyz") == 0); CHECK(memcmp(nvlist_get_binary(nvl, name, NULL), "abcdefghijklmnopqrstuvwxyz", sizeof("abcdefghijklmnopqrstuvwxyz")) == 0); CHECK(memcmp(nvlist_get_binary(nvl, name, &size), "abcdefghijklmnopqrstuvwxyz", sizeof("abcdefghijklmnopqrstuvwxyz")) == 0); CHECK(size == sizeof("abcdefghijklmnopqrstuvwxyz")); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_NVLIST); CHECK(strcmp(name, "nvlist/nvlist/empty") == 0); cnvl = nvlist_get_nvlist(nvl, name); CHECK(nvlist_empty(cnvl)); name = nvlist_next(nvl, &type, &cookie); CHECK(name != NULL); CHECK(type == NV_TYPE_NVLIST); CHECK(strcmp(name, "nvlist/nvlist") == 0); cnvl = nvlist_get_nvlist(nvl, name); ccookie = NULL; cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_BOOL); CHECK(strcmp(cname, "nvlist/bool/true") == 0); CHECK(nvlist_get_bool(cnvl, cname) == true); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_BOOL); CHECK(strcmp(cname, "nvlist/bool/false") == 0); CHECK(nvlist_get_bool(cnvl, cname) == false); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_NUMBER); CHECK(strcmp(cname, "nvlist/number/0") == 0); CHECK(nvlist_get_number(cnvl, cname) == 0); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_NUMBER); CHECK(strcmp(cname, "nvlist/number/1") == 0); CHECK(nvlist_get_number(cnvl, cname) == 1); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_NUMBER); CHECK(strcmp(cname, "nvlist/number/-1") == 0); CHECK((int)nvlist_get_number(cnvl, cname) == -1); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_NUMBER); CHECK(strcmp(cname, "nvlist/number/UINT64_MAX") == 0); CHECK(nvlist_get_number(cnvl, cname) == UINT64_MAX); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_NUMBER); CHECK(strcmp(cname, "nvlist/number/INT64_MIN") == 0); CHECK((int64_t)nvlist_get_number(cnvl, cname) == INT64_MIN); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_NUMBER); CHECK(strcmp(cname, "nvlist/number/INT64_MAX") == 0); CHECK((int64_t)nvlist_get_number(cnvl, cname) == INT64_MAX); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_STRING); CHECK(strcmp(cname, "nvlist/string/") == 0); CHECK(strcmp(nvlist_get_string(cnvl, cname), "") == 0); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_STRING); CHECK(strcmp(cname, "nvlist/string/x") == 0); CHECK(strcmp(nvlist_get_string(cnvl, cname), "x") == 0); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_STRING); CHECK(strcmp(cname, "nvlist/string/abcdefghijklmnopqrstuvwxyz") == 0); CHECK(strcmp(nvlist_get_string(cnvl, cname), "abcdefghijklmnopqrstuvwxyz") == 0); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_DESCRIPTOR); CHECK(strcmp(cname, "nvlist/descriptor/STDERR_FILENO") == 0); CHECK(fd_is_valid(nvlist_get_descriptor(cnvl, cname))); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_BINARY); CHECK(strcmp(cname, "nvlist/binary/x") == 0); CHECK(memcmp(nvlist_get_binary(cnvl, cname, NULL), "x", 1) == 0); CHECK(memcmp(nvlist_get_binary(cnvl, cname, &size), "x", 1) == 0); CHECK(size == 1); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_BINARY); CHECK(strcmp(cname, "nvlist/binary/abcdefghijklmnopqrstuvwxyz") == 0); CHECK(memcmp(nvlist_get_binary(cnvl, cname, NULL), "abcdefghijklmnopqrstuvwxyz", sizeof("abcdefghijklmnopqrstuvwxyz")) == 0); CHECK(memcmp(nvlist_get_binary(cnvl, cname, &size), "abcdefghijklmnopqrstuvwxyz", sizeof("abcdefghijklmnopqrstuvwxyz")) == 0); CHECK(size == sizeof("abcdefghijklmnopqrstuvwxyz")); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname != NULL); CHECK(ctype == NV_TYPE_NVLIST); CHECK(strcmp(cname, "nvlist/nvlist/empty") == 0); empty = nvlist_get_nvlist(cnvl, cname); CHECK(nvlist_empty(empty)); cname = nvlist_next(cnvl, &ctype, &ccookie); CHECK(cname == NULL); name = nvlist_next(nvl, &type, &cookie); CHECK(name == NULL); } -int -main(void) +static void +send_nvlist(void) { int status, socks[2]; pid_t pid; - printf("1..134\n"); - fflush(stdout); - if (socketpair(PF_UNIX, SOCK_STREAM, 0, socks) < 0) err(1, "socketpair() failed"); pid = fork(); switch (pid) { case -1: /* Failure. */ err(1, "unable to fork"); case 0: /* Child. */ close(socks[0]); child(socks[1]); - return (0); + _exit(0); default: /* Parent. */ close(socks[1]); parent(socks[0]); break; } if (waitpid(pid, &status, 0) < 0) err(1, "waitpid() failed"); +} + +static void +send_closed_fd(void) +{ + nvlist_t *nvl; + int error, socks[2]; + + if (socketpair(PF_UNIX, SOCK_STREAM, 0, socks) < 0) + err(1, "socketpair() failed"); + + nvl = nvlist_create(0); + nvlist_add_descriptor(nvl, "fd", 12345); + error = nvlist_error(nvl); + CHECK(error == EBADF); + + error = nvlist_send(socks[1], nvl); + CHECK(error != 0 && errno == EBADF); +} + +int +main(void) +{ + + printf("1..136\n"); + fflush(stdout); + + send_nvlist(); + send_closed_fd(); return (0); } Index: projects/clang700-import/lib/libufs/Makefile =================================================================== --- projects/clang700-import/lib/libufs/Makefile (revision 340426) +++ projects/clang700-import/lib/libufs/Makefile (revision 340427) @@ -1,36 +1,37 @@ # $FreeBSD$ PACKAGE=lib${LIB} LIB= ufs SHLIBDIR?= /lib -SHLIB_MAJOR= 6 +SHLIB_MAJOR= 7 SRCS= block.c cgroup.c crc32.c inode.c sblock.c type.c ffs_subr.c ffs_tables.c INCS= libufs.h -MAN= bread.3 cgread.3 libufs.3 sbread.3 ufs_disk_close.3 +MAN= bread.3 cgread.3 getinode.3 libufs.3 sbread.3 ufs_disk_close.3 MLINKS+= bread.3 bwrite.3 MLINKS+= bread.3 berase.3 MLINKS+= cgread.3 cgread1.3 MLINKS+= cgread.3 cgget.3 MLINKS+= cgread.3 cgwrite.3 MLINKS+= cgread.3 cgwrite1.3 MLINKS+= cgread.3 cgput.3 +MLINKS+= getinode.3 putinode.3 MLINKS+= sbread.3 sbwrite.3 MLINKS+= sbread.3 sbget.3 MLINKS+= sbread.3 sbput.3 MLINKS+= ufs_disk_close.3 ufs_disk_fillout.3 MLINKS+= ufs_disk_close.3 ufs_disk_fillout_blank.3 MLINKS+= ufs_disk_close.3 ufs_disk_write.3 .PATH: ${SRCTOP}/sys/libkern ${SRCTOP}/sys/ufs/ffs WARNS?= 2 CFLAGS+= -D_LIBUFS .if defined(LIBUFS_DEBUG) CFLAGS+= -D_LIBUFS_DEBUGGING .endif CFLAGS+= -I${.CURDIR} .include Index: projects/clang700-import/lib/libufs/getinode.3 =================================================================== --- projects/clang700-import/lib/libufs/getinode.3 (nonexistent) +++ projects/clang700-import/lib/libufs/getinode.3 (revision 340427) @@ -0,0 +1,131 @@ +.\" Author: Marshall Kirk McKusick +.\" Date: January 19, 2018 +.\" Description: +.\" Manual page for libufs functions: +.\" getinode(3) +.\" putinode(3) +.\" +.\" This file is in the public domain. +.\" +.\" $FreeBSD$ +.\" +.Dd November 10, 2018 +.Dt GETINODE 3 +.Os +.Sh NAME +.Nm getinode , putinode +.Nd fetch and store inodes on a UFS file system +.Sh LIBRARY +.Lb libufs +.Sh SYNOPSIS +.In ufs/ufs/dinode.h +.In ufs/ffs/fs.h +.In libufs.h +.Ft int +.Fn getinode "struct uufsd *disk" "union dinodep *dp" "ino_t inumber" +.Ft int +.Fn putinode "struct uufsd *disk" +.Sh DESCRIPTION +The +.Fn getinode +and +.Fn putinode +functions provide an inode fetch and store API for +.Xr libufs 3 +consumers. +They operate on a userland UFS disk structure. +The +.Fn getinode +function fetches the specified inode from the filesystem. +The +.Fn putinode +function stores the most recently fetched inode to the filesystem. +.Pp +The +.Va dinodep +union is defined as: +.Bd -literal -offset indent +union dinodep { + struct ufs1_dinode *dp1; + struct ufs2_dinode *dp2; +}; +.Ed +.Pp +Sample code to clear write permissions for inode number +.Fa inumber +stored on the filesystem described by +.Fa diskp . +.Bd -literal -offset indent +#include +#include + +#include +#include +#include + +void +clearwrite(struct uufsd *diskp, ino_t inumber) +{ + union dinodep dp; + + if (getinode(diskp, &dp, inumber) == -1) + err(1, "getinode: %s", diskp->d_error); + switch (diskp->d_ufs) { + case 1: /* UFS 1 filesystem */ + dp.dp1->di_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH); + break; + case 2: /* UFS 2 filesystem */ + dp.dp2->di_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH); + break; + default: + errx(1, "unknown filesystem type"); + } + if (putinode(diskp) == -1) + err(1, "putinode: %s", diskp->d_error); +} +.Ed +.Sh RETURN VALUES +The +.Fn getinode +and +.Fn putinode +functions return 0 on success, or \-1 in case of any error. +A string describing the error is stored in +.Fa diskp->d_error . +The global +.Fa errno +often provides additional information. +.Sh ERRORS +The function +.Fn getinode +may fail and set +.Va errno +for any of the errors specified for the library function +.Xr pread 2 . +It can also fail if the inode number is out of the range of inodes +in the filesystem. +.Pp +The function +.Fn putinode +may fail and set +.Va errno +for any of the errors specified for the library functions +.Xr ufs_disk_write 3 +or +.Xr pwrite 2 . +.Pp +Additionally both functions may follow the +.Xr libufs 3 +error methodologies in case of a device error. +.Sh SEE ALSO +.Xr pread 2 , +.Xr pwrite 2 , +.Xr libufs 3 , +.Xr ufs_disk_write 3 +.Sh HISTORY +These functions first appeared as part of +.Xr libufs 3 +in +.Fx 13.0 . +.Sh AUTHORS +.An Marshall Kirk McKusick Aq Mt mckusick@freebsd.org Property changes on: projects/clang700-import/lib/libufs/getinode.3 ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang700-import/lib/libufs/inode.c =================================================================== --- projects/clang700-import/lib/libufs/inode.c (revision 340426) +++ projects/clang700-import/lib/libufs/inode.c (revision 340427) @@ -1,121 +1,115 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002 Juli Mallett. All rights reserved. * * This software was written by Juli Mallett for the * FreeBSD project. Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the following * conditions are met: * * 1. Redistribution of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistribution in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include int -getino(struct uufsd *disk, void **dino, ino_t inode, int *mode) +getinode(struct uufsd *disk, union dinodep *dp, ino_t inum) { ino_t min, max; caddr_t inoblock; - struct ufs1_dinode *dp1; - struct ufs2_dinode *dp2; struct fs *fs; ERROR(disk, NULL); fs = &disk->d_fs; - if (inode >= (ino_t)fs->fs_ipg * fs->fs_ncg) { + if (inum >= (ino_t)fs->fs_ipg * fs->fs_ncg) { ERROR(disk, "inode number out of range"); return (-1); } inoblock = disk->d_inoblock; min = disk->d_inomin; max = disk->d_inomax; if (inoblock == NULL) { inoblock = malloc(fs->fs_bsize); if (inoblock == NULL) { ERROR(disk, "unable to allocate inode block"); return (-1); } disk->d_inoblock = inoblock; } - if (inode >= min && inode < max) + if (inum >= min && inum < max) goto gotit; - bread(disk, fsbtodb(fs, ino_to_fsba(fs, inode)), inoblock, + bread(disk, fsbtodb(fs, ino_to_fsba(fs, inum)), inoblock, fs->fs_bsize); - disk->d_inomin = min = inode - (inode % INOPB(fs)); + disk->d_inomin = min = inum - (inum % INOPB(fs)); disk->d_inomax = max = min + INOPB(fs); gotit: switch (disk->d_ufs) { case 1: - dp1 = &((struct ufs1_dinode *)inoblock)[inode - min]; - if (mode != NULL) - *mode = dp1->di_mode & IFMT; - if (dino != NULL) - *dino = dp1; + disk->d_dp.dp1 = &((struct ufs1_dinode *)inoblock)[inum - min]; + if (dp != NULL) + *dp = disk->d_dp; return (0); case 2: - dp2 = &((struct ufs2_dinode *)inoblock)[inode - min]; - if (mode != NULL) - *mode = dp2->di_mode & IFMT; - if (dino != NULL) - *dino = dp2; + disk->d_dp.dp2 = &((struct ufs2_dinode *)inoblock)[inum - min]; + if (dp != NULL) + *dp = disk->d_dp; return (0); default: break; } ERROR(disk, "unknown UFS filesystem type"); return (-1); } int -putino(struct uufsd *disk) +putinode(struct uufsd *disk) { struct fs *fs; fs = &disk->d_fs; if (disk->d_inoblock == NULL) { ERROR(disk, "No inode block allocated"); return (-1); } if (bwrite(disk, fsbtodb(fs, ino_to_fsba(&disk->d_fs, disk->d_inomin)), disk->d_inoblock, disk->d_fs.fs_bsize) <= 0) return (-1); return (0); } Index: projects/clang700-import/lib/libufs/libufs.h =================================================================== --- projects/clang700-import/lib/libufs/libufs.h (revision 340426) +++ projects/clang700-import/lib/libufs/libufs.h (revision 340427) @@ -1,165 +1,170 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002 Juli Mallett. All rights reserved. * * This software was written by Juli Mallett for the * FreeBSD project. Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the following * conditions are met: * * 1. Redistribution of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistribution in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __LIBUFS_H__ #define __LIBUFS_H__ /* * libufs structures. */ +union dinodep { + struct ufs1_dinode *dp1; + struct ufs2_dinode *dp2; +}; /* * userland ufs disk. */ struct uufsd { const char *d_name; /* disk name */ int d_ufs; /* decimal UFS version */ int d_fd; /* raw device file descriptor */ long d_bsize; /* device bsize */ ufs2_daddr_t d_sblock; /* superblock location */ struct csum *d_sbcsum; /* Superblock summary info */ caddr_t d_inoblock; /* inode block */ uint32_t d_inomin; /* low inode (not ino_t for ABI compat) */ uint32_t d_inomax; /* high inode (not ino_t for ABI compat) */ + union dinodep d_dp; /* pointer to currently active inode */ union { struct fs d_fs; /* filesystem information */ char d_sb[MAXBSIZE]; /* superblock as buffer */ } d_sbunion; union { struct cg d_cg; /* cylinder group */ char d_buf[MAXBSIZE]; /* cylinder group storage */ } d_cgunion; int d_ccg; /* current cylinder group */ int d_lcg; /* last cylinder group (in d_cg) */ const char *d_error; /* human readable disk error */ int d_mine; /* internal flags */ #define d_fs d_sbunion.d_fs #define d_sb d_sbunion.d_sb #define d_cg d_cgunion.d_cg }; /* * libufs macros (internal, non-exported). */ #ifdef _LIBUFS /* * Trace steps through libufs, to be used at entry and erroneous return. */ static inline void ERROR(struct uufsd *u, const char *str) { #ifdef _LIBUFS_DEBUGGING if (str != NULL) { fprintf(stderr, "libufs: %s", str); if (errno != 0) fprintf(stderr, ": %s", strerror(errno)); fprintf(stderr, "\n"); } #endif if (u != NULL) u->d_error = str; } #endif /* _LIBUFS */ __BEGIN_DECLS /* * libufs prototypes. */ /* * ffs_subr.c */ void ffs_clrblock(struct fs *, u_char *, ufs1_daddr_t); void ffs_clusteracct(struct fs *, struct cg *, ufs1_daddr_t, int); void ffs_fragacct(struct fs *, int, int32_t [], int); int ffs_isblock(struct fs *, u_char *, ufs1_daddr_t); int ffs_isfreeblock(struct fs *, u_char *, ufs1_daddr_t); void ffs_setblock(struct fs *, u_char *, ufs1_daddr_t); int ffs_sbget(void *, struct fs **, off_t, char *, int (*)(void *, off_t, void **, int)); int ffs_sbput(void *, struct fs *, off_t, int (*)(void *, off_t, void *, int)); /* * block.c */ ssize_t bread(struct uufsd *, ufs2_daddr_t, void *, size_t); ssize_t bwrite(struct uufsd *, ufs2_daddr_t, const void *, size_t); int berase(struct uufsd *, ufs2_daddr_t, ufs2_daddr_t); /* * cgroup.c */ ufs2_daddr_t cgballoc(struct uufsd *); int cgbfree(struct uufsd *, ufs2_daddr_t, long); ino_t cgialloc(struct uufsd *); int cgget(struct uufsd *, int, struct cg *); int cgput(struct uufsd *, struct cg *); int cgread(struct uufsd *); int cgread1(struct uufsd *, int); int cgwrite(struct uufsd *); int cgwrite1(struct uufsd *, int); /* * inode.c */ -int getino(struct uufsd *, void **, ino_t, int *); -int putino(struct uufsd *); +int getinode(struct uufsd *, union dinodep *, ino_t); +int putinode(struct uufsd *); /* * sblock.c */ int sbread(struct uufsd *); int sbwrite(struct uufsd *, int); /* low level superblock read/write functions */ int sbget(int, struct fs **, off_t); int sbput(int, struct fs *, int); /* * type.c */ int ufs_disk_close(struct uufsd *); int ufs_disk_fillout(struct uufsd *, const char *); int ufs_disk_fillout_blank(struct uufsd *, const char *); int ufs_disk_write(struct uufsd *); /* * crc32c.c */ uint32_t calculate_crc32c(uint32_t, const void *, size_t); __END_DECLS #endif /* __LIBUFS_H__ */ Index: projects/clang700-import/release/scripts/make-manifest.sh =================================================================== --- projects/clang700-import/release/scripts/make-manifest.sh (revision 340426) +++ projects/clang700-import/release/scripts/make-manifest.sh (revision 340427) @@ -1,73 +1,73 @@ #!/bin/sh # make-manifest.sh: create checksums and package descriptions for the installer # # Usage: make-manifest.sh foo1.txz foo2.txz ... # # The output file looks like this (tab-delimited): # foo1.txz SHA256-checksum Number-of-files foo1 Description Install-by-default # # $FreeBSD$ base="Base system" -doc="Additional Documentation" kernel="Kernel" ports="Ports tree" src="System source tree" lib32="32-bit compatibility libraries" tests="Test suite" desc_base="${base} (MANDATORY)" desc_base_dbg="${base} (Debugging)" -desc_doc="${doc}" desc_kernel="${kernel} (MANDATORY)" desc_kernel_dbg="${kernel} (Debugging)" desc_kernel_alt="Alternate ${kernel}" desc_kernel_alt_dbg="Alternate ${kernel} (Debugging)" desc_lib32="${lib32}" desc_lib32_dbg="${lib32} (Debugging)" desc_ports="${ports}" desc_src="${src}" desc_tests="${tests}" -default_doc=off default_src=off default_ports=off default_tests=off default_base_dbg=off default_lib32_dbg=off default_kernel_alt=off default_kernel_dbg=on default_kernel_alt_dbg=off for i in ${*}; do dist="${i}" distname="${i%%.txz}" distname="$(echo ${distname} | tr '-' '_')" distname="$(echo ${distname} | tr 'kernel.' 'kernel_')" hash="$(sha256 -q ${i})" nfiles="$(tar tvf ${i} | wc -l | tr -d ' ')" default="$(eval echo \${default_${distname}:-on})" desc="$(eval echo \"\${desc_${distname}}\")" case ${i} in + doc.txz) + continue + ;; kernel-dbg.txz) desc="${desc_kernel_dbg}" ;; kernel.*-dbg.txz) desc="$(eval echo \"${desc_kernel_alt_dbg}\")" desc="${desc}: $(eval echo ${i%%-dbg.txz} | cut -f 2 -d '.')" default="$(eval echo \"${default_kernel_alt_dbg}\")" ;; kernel.*.txz) desc="$(eval echo \"${desc_kernel_alt}\")" desc="${desc}: $(eval echo ${i%%.txz} | cut -f 2 -d '.')" default="$(eval echo \"${default_kernel_alt}\")" ;; *) ;; esac printf "${dist}\t${hash}\t${nfiles}\t${distname}\t\"${desc}\"\t${default}\n" done Index: projects/clang700-import/sbin/clri/clri.c =================================================================== --- projects/clang700-import/sbin/clri/clri.c (revision 340426) +++ projects/clang700-import/sbin/clri/clri.c (revision 340427) @@ -1,127 +1,122 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Rich $alz of BBN Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1990, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint static char sccsid[] = "@(#)clri.c 8.2 (Berkeley) 9/23/93"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -union dinodep { - struct ufs1_dinode *dp1; - struct ufs2_dinode *dp2; -}; - static void usage(void) { (void)fprintf(stderr, "usage: clri special_device inode_number ...\n"); exit(1); } int main(int argc, char *argv[]) { union dinodep dp; struct uufsd disk; long generation; int inonum, exitval; char *fsname; if (argc < 3) usage(); /* get the superblock. */ fsname = *++argv; if (ufs_disk_fillout(&disk, fsname) == -1) { printf("loading superblock: %s\n", disk.d_error); exit (1); } /* remaining arguments are inode numbers. */ exitval = 0; while (*++argv) { /* get the inode number. */ if ((inonum = atoi(*argv)) < UFS_ROOTINO) { printf("%s is not a valid inode number", *argv); exitval = 1; continue; } (void)printf("clearing %d\n", inonum); - if (getino(&disk, (void **)&dp, inonum, NULL) == -1) { - printf("getino: %s\n", disk.d_error); + if (getinode(&disk, &dp, inonum) == -1) { + printf("getinode: %s\n", disk.d_error); exitval = 1; continue; } /* clear the inode, and bump the generation count. */ if (disk.d_fs.fs_magic == FS_UFS1_MAGIC) { generation = dp.dp1->di_gen + 1; memset(dp.dp1, 0, sizeof(*dp.dp1)); dp.dp1->di_gen = generation; } else { generation = dp.dp2->di_gen + 1; memset(dp.dp2, 0, sizeof(*dp.dp2)); dp.dp2->di_gen = generation; } - putino(&disk); + putinode(&disk); (void)fsync(disk.d_fd); } (void)ufs_disk_close(&disk); exit(exitval); } Index: projects/clang700-import/sbin/dump/dump.h =================================================================== --- projects/clang700-import/sbin/dump/dump.h (revision 340426) +++ projects/clang700-import/sbin/dump/dump.h (revision 340427) @@ -1,184 +1,184 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)dump.h 8.2 (Berkeley) 4/28/95 * * $FreeBSD$ */ /* * Dump maps used to describe what is to be dumped. */ int mapsize; /* size of the state maps */ char *usedinomap; /* map of allocated inodes */ char *dumpdirmap; /* map of directories to be dumped */ char *dumpinomap; /* map of files to be dumped */ /* * Map manipulation macros. */ #define SETINO(ino, map) \ map[(u_int)((ino) - 1) / CHAR_BIT] |= \ 1 << ((u_int)((ino) - 1) % CHAR_BIT) #define CLRINO(ino, map) \ map[(u_int)((ino) - 1) / CHAR_BIT] &= \ ~(1 << ((u_int)((ino) - 1) % CHAR_BIT)) #define TSTINO(ino, map) \ (map[(u_int)((ino) - 1) / CHAR_BIT] & \ (1 << ((u_int)((ino) - 1) % CHAR_BIT))) /* * All calculations done in 0.1" units! */ char *disk; /* name of the disk file */ char *tape; /* name of the tape file */ char *popenout; /* popen(3) per-"tape" command */ char *dumpdates; /* name of the file containing dump date information*/ char *temp; /* name of the file for doing rewrite of dumpdates */ int lastlevel; /* dump level of previous dump */ int level; /* dump level of this dump */ int uflag; /* update flag */ int diskfd; /* disk file descriptor */ int tapefd; /* tape file descriptor */ int pipeout; /* true => output to standard output */ ino_t curino; /* current inumber; used globally */ int newtape; /* new tape flag */ int density; /* density in 0.1" units */ long tapesize; /* estimated tape size, blocks */ long tsize; /* tape size in 0.1" units */ long asize; /* number of 0.1" units written on current tape */ int etapes; /* estimated number of tapes */ int nonodump; /* if set, do not honor UF_NODUMP user flags */ int unlimited; /* if set, write to end of medium */ int cachesize; /* size of block cache in bytes */ int rsync_friendly; /* be friendly with rsync */ int notify; /* notify operator flag */ int blockswritten; /* number of blocks written on current tape */ int tapeno; /* current tape number */ time_t tstart_writing; /* when started writing the first tape block */ time_t tend_writing; /* after writing the last tape block */ int passno; /* current dump pass number */ struct fs *sblock; /* the file system super block */ long dev_bsize; /* block size of underlying disk device */ int dev_bshift; /* log2(dev_bsize) */ int tp_bshift; /* log2(TP_BSIZE) */ /* operator interface functions */ void broadcast(const char *message); void infosch(int); void lastdump(int arg); /* int should be char */ void msg(const char *fmt, ...) __printflike(1, 2); void msgtail(const char *fmt, ...) __printflike(1, 2); int query(const char *question); void quit(const char *fmt, ...) __printflike(1, 2); void timeest(void); time_t unctime(char *str); /* mapping rouintes */ union dinode; int mapfiles(ino_t maxino, long *tapesize); int mapdirs(ino_t maxino, long *tapesize); /* file dumping routines */ void blkread(ufs2_daddr_t blkno, char *buf, int size); ssize_t cread(int fd, void *buf, size_t nbytes, off_t offset); void dumpino(union dinode *dp, ino_t ino); void dumpmap(char *map, int type, ino_t ino); void writeheader(ino_t ino); /* tape writing routines */ int alloctape(void); void close_rewind(void); void dumpblock(ufs2_daddr_t blkno, int size); void startnewtape(int top); void trewind(void); void writerec(char *dp, int isspcl); void Exit(int status) __dead2; void dumpabort(int signo) __dead2; void dump_getfstab(void); char *rawname(char *cp); -union dinode *getinode(ino_t inum, int *mode); +union dinode *getino(ino_t inum, int *mode); /* rdump routines */ #ifdef RDUMP void rmtclose(void); int rmthost(const char *host); int rmtopen(const char *tape, int mode); int rmtwrite(const char *buf, int count); #endif /* RDUMP */ void interrupt(int signo); /* in case operator bangs on console */ /* * Exit status codes */ #define X_FINOK 0 /* normal exit */ #define X_STARTUP 1 /* startup error */ #define X_REWRITE 2 /* restart writing from the check point */ #define X_ABORT 3 /* abort dump; don't attempt checkpointing */ #define OPGRENT "operator" /* group entry to notify */ struct fstab *fstabsearch(const char *key); /* search fs_file and fs_spec */ #ifndef NAME_MAX #define NAME_MAX 255 #endif /* * The contents of the file _PATH_DUMPDATES is maintained both on * a linked list, and then (eventually) arrayified. */ struct dumpdates { char dd_name[NAME_MAX+3]; int dd_level; time_t dd_ddate; }; int nddates; /* number of records (might be zero) */ struct dumpdates **ddatev; /* the arrayfied version */ void initdumptimes(void); void getdumptime(void); void putdumptime(void); #define ITITERATE(i, ddp) \ if (ddatev != NULL) \ for (ddp = ddatev[i = 0]; i < nddates; ddp = ddatev[++i]) #define DUMPFMTLEN 53 /* max device pathname length */ #define DUMPOUTFMT "%-*s %d %s" /* for printf */ /* name, level, ctime(date) */ #define DUMPINFMT "%s %d %[^\n]\n" /* inverse for scanf */ void sig(int signo); #ifndef _PATH_FSTAB #define _PATH_FSTAB "/etc/fstab" #endif Index: projects/clang700-import/sbin/dump/main.c =================================================================== --- projects/clang700-import/sbin/dump/main.c (revision 340426) +++ projects/clang700-import/sbin/dump/main.c (revision 340427) @@ -1,773 +1,773 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1980, 1991, 1993, 1994\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint #if 0 static char sccsid[] = "@(#)main.c 8.6 (Berkeley) 5/1/95"; #endif static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dump.h" #include "pathnames.h" int notify = 0; /* notify operator flag */ int snapdump = 0; /* dumping live filesystem, so use snapshot */ int blockswritten = 0; /* number of blocks written on current tape */ int tapeno = 0; /* current tape number */ int density = 0; /* density in bytes/0.1" " <- this is for hilit19 */ int ntrec = NTREC; /* # tape blocks in each tape record */ int cartridge = 0; /* Assume non-cartridge tape */ int cachesize = 0; /* block cache size (in bytes), defaults to 0 */ long dev_bsize = 1; /* recalculated below */ long blocksperfile; /* output blocks per file */ char *host = NULL; /* remote host (if any) */ static char *getmntpt(char *, int *); static long numarg(const char *, long, long); static void obsolete(int *, char **[]); static void usage(void) __dead2; int main(int argc, char *argv[]) { struct stat sb; ino_t ino; int dirty; union dinode *dp; struct fstab *dt; char *map, *mntpt; int ch, mode, mntflags; int i, ret, anydirskipped, bflag = 0, Tflag = 0, honorlevel = 1; int just_estimate = 0; ino_t maxino; char *tmsg; spcl.c_date = _time_to_time64(time(NULL)); tsize = 0; /* Default later, based on 'c' option for cart tapes */ dumpdates = _PATH_DUMPDATES; popenout = NULL; tape = NULL; temp = _PATH_DTMP; if (TP_BSIZE / DEV_BSIZE == 0 || TP_BSIZE % DEV_BSIZE != 0) quit("TP_BSIZE must be a multiple of DEV_BSIZE\n"); level = 0; rsync_friendly = 0; if (argc < 2) usage(); obsolete(&argc, &argv); while ((ch = getopt(argc, argv, "0123456789aB:b:C:cD:d:f:h:LnP:RrSs:T:uWw")) != -1) switch (ch) { /* dump level */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': level = 10 * level + ch - '0'; break; case 'a': /* `auto-size', Write to EOM. */ unlimited = 1; break; case 'B': /* blocks per output file */ blocksperfile = numarg("number of blocks per file", 1L, 0L); break; case 'b': /* blocks per tape write */ ntrec = numarg("number of blocks per write", 1L, 1000L); break; case 'C': cachesize = numarg("cachesize", 0, 0) * 1024 * 1024; break; case 'c': /* Tape is cart. not 9-track */ cartridge = 1; break; case 'D': dumpdates = optarg; break; case 'd': /* density, in bits per inch */ density = numarg("density", 10L, 327670L) / 10; if (density >= 625 && !bflag) ntrec = HIGHDENSITYTREC; break; case 'f': /* output file */ if (popenout != NULL) errx(X_STARTUP, "You cannot use the P and f " "flags together.\n"); tape = optarg; break; case 'h': honorlevel = numarg("honor level", 0L, 10L); break; case 'L': snapdump = 1; break; case 'n': /* notify operators */ notify = 1; break; case 'P': if (tape != NULL) errx(X_STARTUP, "You cannot use the P and f " "flags together.\n"); popenout = optarg; break; case 'r': /* store slightly less data to be friendly to rsync */ if (rsync_friendly < 1) rsync_friendly = 1; break; case 'R': /* store even less data to be friendlier to rsync */ if (rsync_friendly < 2) rsync_friendly = 2; break; case 'S': /* exit after estimating # of tapes */ just_estimate = 1; break; case 's': /* tape size, feet */ tsize = numarg("tape size", 1L, 0L) * 12 * 10; break; case 'T': /* time of last dump */ spcl.c_ddate = unctime(optarg); if (spcl.c_ddate < 0) { (void)fprintf(stderr, "bad time \"%s\"\n", optarg); exit(X_STARTUP); } Tflag = 1; lastlevel = -1; break; case 'u': /* update /etc/dumpdates */ uflag = 1; break; case 'W': /* what to do */ case 'w': lastdump(ch); exit(X_FINOK); /* do nothing else */ default: usage(); } argc -= optind; argv += optind; if (argc < 1) { (void)fprintf(stderr, "Must specify disk or file system\n"); exit(X_STARTUP); } disk = *argv++; argc--; if (argc >= 1) { (void)fprintf(stderr, "Unknown arguments to dump:"); while (argc--) (void)fprintf(stderr, " %s", *argv++); (void)fprintf(stderr, "\n"); exit(X_STARTUP); } if (rsync_friendly && (level > 0)) { (void)fprintf(stderr, "%s %s\n", "rsync friendly options", "can be used only with level 0 dumps."); exit(X_STARTUP); } if (Tflag && uflag) { (void)fprintf(stderr, "You cannot use the T and u flags together.\n"); exit(X_STARTUP); } if (popenout) { tape = "child pipeline process"; } else if (tape == NULL && (tape = getenv("TAPE")) == NULL) tape = _PATH_DEFTAPE; if (strcmp(tape, "-") == 0) { pipeout++; tape = "standard output"; } if (blocksperfile) blocksperfile = rounddown(blocksperfile, ntrec); else if (!unlimited) { /* * Determine how to default tape size and density * * density tape size * 9-track 1600 bpi (160 bytes/.1") 2300 ft. * 9-track 6250 bpi (625 bytes/.1") 2300 ft. * cartridge 8000 bpi (100 bytes/.1") 1700 ft. * (450*4 - slop) * hilit19 hits again: " */ if (density == 0) density = cartridge ? 100 : 160; if (tsize == 0) tsize = cartridge ? 1700L*120L : 2300L*120L; } if (strchr(tape, ':')) { host = tape; tape = strchr(host, ':'); *tape++ = '\0'; #ifdef RDUMP if (strchr(tape, '\n')) { (void)fprintf(stderr, "invalid characters in tape\n"); exit(X_STARTUP); } if (rmthost(host) == 0) exit(X_STARTUP); #else (void)fprintf(stderr, "remote dump not enabled\n"); exit(X_STARTUP); #endif } (void)setuid(getuid()); /* rmthost() is the only reason to be setuid */ if (signal(SIGHUP, SIG_IGN) != SIG_IGN) signal(SIGHUP, sig); if (signal(SIGTRAP, SIG_IGN) != SIG_IGN) signal(SIGTRAP, sig); if (signal(SIGFPE, SIG_IGN) != SIG_IGN) signal(SIGFPE, sig); if (signal(SIGBUS, SIG_IGN) != SIG_IGN) signal(SIGBUS, sig); if (signal(SIGSEGV, SIG_IGN) != SIG_IGN) signal(SIGSEGV, sig); if (signal(SIGTERM, SIG_IGN) != SIG_IGN) signal(SIGTERM, sig); if (signal(SIGINT, interrupt) == SIG_IGN) signal(SIGINT, SIG_IGN); dump_getfstab(); /* /etc/fstab snarfed */ /* * disk can be either the full special file name, * the suffix of the special file name, * the special name missing the leading '/', * the file system name with or without the leading '/'. */ dt = fstabsearch(disk); if (dt != NULL) { disk = rawname(dt->fs_spec); if (disk == NULL) errx(X_STARTUP, "%s: unknown file system", dt->fs_spec); (void)strncpy(spcl.c_dev, dt->fs_spec, NAMELEN); (void)strncpy(spcl.c_filesys, dt->fs_file, NAMELEN); } else { (void)strncpy(spcl.c_dev, disk, NAMELEN); (void)strncpy(spcl.c_filesys, "an unlisted file system", NAMELEN); } spcl.c_dev[NAMELEN-1]='\0'; spcl.c_filesys[NAMELEN-1]='\0'; if ((mntpt = getmntpt(disk, &mntflags)) != NULL) { if (mntflags & MNT_RDONLY) { if (snapdump != 0) { msg("WARNING: %s\n", "-L ignored for read-only filesystem."); snapdump = 0; } } else if (snapdump == 0) { msg("WARNING: %s\n", "should use -L when dumping live read-write " "filesystems!"); } else { char snapname[BUFSIZ], snapcmd[BUFSIZ]; snprintf(snapname, sizeof snapname, "%s/.snap", mntpt); if ((stat(snapname, &sb) < 0) || !S_ISDIR(sb.st_mode)) { msg("WARNING: %s %s\n", "-L requested but snapshot location", snapname); msg(" %s: %s\n", "is not a directory", "dump downgraded, -L ignored"); snapdump = 0; } else { snprintf(snapname, sizeof snapname, "%s/.snap/dump_snapshot", mntpt); snprintf(snapcmd, sizeof snapcmd, "%s %s %s", _PATH_MKSNAP_FFS, mntpt, snapname); unlink(snapname); if (system(snapcmd) != 0) errx(X_STARTUP, "Cannot create %s: %s\n", snapname, strerror(errno)); if ((diskfd = open(snapname, O_RDONLY)) < 0) { unlink(snapname); errx(X_STARTUP, "Cannot open %s: %s\n", snapname, strerror(errno)); } unlink(snapname); if (fstat(diskfd, &sb) != 0) err(X_STARTUP, "%s: stat", snapname); spcl.c_date = _time_to_time64(sb.st_mtime); } } } else if (snapdump != 0) { msg("WARNING: Cannot use -L on an unmounted filesystem.\n"); snapdump = 0; } if (snapdump == 0) { if ((diskfd = open(disk, O_RDONLY)) < 0) err(X_STARTUP, "Cannot open %s", disk); if (fstat(diskfd, &sb) != 0) err(X_STARTUP, "%s: stat", disk); if (S_ISDIR(sb.st_mode)) errx(X_STARTUP, "%s: unknown file system", disk); } (void)strcpy(spcl.c_label, "none"); (void)gethostname(spcl.c_host, NAMELEN); spcl.c_level = level; spcl.c_type = TS_TAPE; if (rsync_friendly) { /* don't store real dump times */ spcl.c_date = 0; spcl.c_ddate = 0; } if (spcl.c_date == 0) { tmsg = "the epoch\n"; } else { time_t t = _time64_to_time(spcl.c_date); tmsg = ctime(&t); } msg("Date of this level %d dump: %s", level, tmsg); if (!Tflag && (!rsync_friendly)) getdumptime(); /* /etc/dumpdates snarfed */ if (spcl.c_ddate == 0) { tmsg = "the epoch\n"; } else { time_t t = _time64_to_time(spcl.c_ddate); tmsg = ctime(&t); } if (lastlevel < 0) msg("Date of last (level unknown) dump: %s", tmsg); else msg("Date of last level %d dump: %s", lastlevel, tmsg); msg("Dumping %s%s ", snapdump ? "snapshot of ": "", disk); if (dt != NULL) msgtail("(%s) ", dt->fs_file); if (host) msgtail("to %s on host %s\n", tape, host); else msgtail("to %s\n", tape); sync(); if ((ret = sbget(diskfd, &sblock, -1)) != 0) { switch (ret) { case ENOENT: warn("Cannot find file system superblock"); return (1); default: warn("Unable to read file system superblock"); return (1); } } dev_bsize = sblock->fs_fsize / fsbtodb(sblock, 1); dev_bshift = ffs(dev_bsize) - 1; if (dev_bsize != (1 << dev_bshift)) quit("dev_bsize (%ld) is not a power of 2", dev_bsize); tp_bshift = ffs(TP_BSIZE) - 1; if (TP_BSIZE != (1 << tp_bshift)) quit("TP_BSIZE (%d) is not a power of 2", TP_BSIZE); maxino = sblock->fs_ipg * sblock->fs_ncg; mapsize = roundup(howmany(maxino, CHAR_BIT), TP_BSIZE); usedinomap = (char *)calloc((unsigned) mapsize, sizeof(char)); dumpdirmap = (char *)calloc((unsigned) mapsize, sizeof(char)); dumpinomap = (char *)calloc((unsigned) mapsize, sizeof(char)); tapesize = 3 * (howmany(mapsize * sizeof(char), TP_BSIZE) + 1); nonodump = spcl.c_level < honorlevel; passno = 1; setproctitle("%s: pass 1: regular files", disk); msg("mapping (Pass I) [regular files]\n"); anydirskipped = mapfiles(maxino, &tapesize); passno = 2; setproctitle("%s: pass 2: directories", disk); msg("mapping (Pass II) [directories]\n"); while (anydirskipped) { anydirskipped = mapdirs(maxino, &tapesize); } if (pipeout || unlimited) { tapesize += 10; /* 10 trailer blocks */ msg("estimated %ld tape blocks.\n", tapesize); } else { double fetapes; if (blocksperfile) fetapes = (double) tapesize / blocksperfile; else if (cartridge) { /* Estimate number of tapes, assuming streaming stops at the end of each block written, and not in mid-block. Assume no erroneous blocks; this can be compensated for with an artificially low tape size. */ fetapes = ( (double) tapesize /* blocks */ * TP_BSIZE /* bytes/block */ * (1.0/density) /* 0.1" / byte " */ + (double) tapesize /* blocks */ * (1.0/ntrec) /* streaming-stops per block */ * 15.48 /* 0.1" / streaming-stop " */ ) * (1.0 / tsize ); /* tape / 0.1" " */ } else { /* Estimate number of tapes, for old fashioned 9-track tape */ int tenthsperirg = (density == 625) ? 3 : 7; fetapes = ( (double) tapesize /* blocks */ * TP_BSIZE /* bytes / block */ * (1.0/density) /* 0.1" / byte " */ + (double) tapesize /* blocks */ * (1.0/ntrec) /* IRG's / block */ * tenthsperirg /* 0.1" / IRG " */ ) * (1.0 / tsize ); /* tape / 0.1" " */ } etapes = fetapes; /* truncating assignment */ etapes++; /* count the dumped inodes map on each additional tape */ tapesize += (etapes - 1) * (howmany(mapsize * sizeof(char), TP_BSIZE) + 1); tapesize += etapes + 10; /* headers + 10 trailer blks */ msg("estimated %ld tape blocks on %3.2f tape(s).\n", tapesize, fetapes); } /* * If the user only wants an estimate of the number of * tapes, exit now. */ if (just_estimate) exit(0); /* * Allocate tape buffer. */ if (!alloctape()) quit( "can't allocate tape buffers - try a smaller blocking factor.\n"); startnewtape(1); (void)time((time_t *)&(tstart_writing)); dumpmap(usedinomap, TS_CLRI, maxino - 1); passno = 3; setproctitle("%s: pass 3: directories", disk); msg("dumping (Pass III) [directories]\n"); dirty = 0; /* XXX just to get gcc to shut up */ for (map = dumpdirmap, ino = 1; ino < maxino; ino++) { if (((ino - 1) % CHAR_BIT) == 0) /* map is offset by 1 */ dirty = *map++; else dirty >>= 1; if ((dirty & 1) == 0) continue; /* * Skip directory inodes deleted and maybe reallocated */ - dp = getinode(ino, &mode); + dp = getino(ino, &mode); if (mode != IFDIR) continue; (void)dumpino(dp, ino); } passno = 4; setproctitle("%s: pass 4: regular files", disk); msg("dumping (Pass IV) [regular files]\n"); for (map = dumpinomap, ino = 1; ino < maxino; ino++) { if (((ino - 1) % CHAR_BIT) == 0) /* map is offset by 1 */ dirty = *map++; else dirty >>= 1; if ((dirty & 1) == 0) continue; /* * Skip inodes deleted and reallocated as directories. */ - dp = getinode(ino, &mode); + dp = getino(ino, &mode); if (mode == IFDIR) continue; (void)dumpino(dp, ino); } (void)time((time_t *)&(tend_writing)); spcl.c_type = TS_END; for (i = 0; i < ntrec; i++) writeheader(maxino - 1); if (pipeout) msg("DUMP: %jd tape blocks\n", (intmax_t)spcl.c_tapea); else msg("DUMP: %jd tape blocks on %d volume%s\n", (intmax_t)spcl.c_tapea, spcl.c_volume, (spcl.c_volume == 1) ? "" : "s"); /* report dump performance, avoid division through zero */ if (tend_writing - tstart_writing == 0) msg("finished in less than a second\n"); else msg("finished in %jd seconds, throughput %jd KBytes/sec\n", (intmax_t)tend_writing - tstart_writing, (intmax_t)(spcl.c_tapea / (tend_writing - tstart_writing))); putdumptime(); trewind(); broadcast("DUMP IS DONE!\a\a\n"); msg("DUMP IS DONE\n"); Exit(X_FINOK); /* NOTREACHED */ } static void usage(void) { fprintf(stderr, "usage: dump [-0123456789acLnSu] [-B records] [-b blocksize] [-C cachesize]\n" " [-D dumpdates] [-d density] [-f file | -P pipecommand] [-h level]\n" " [-s feet] [-T date] filesystem\n" " dump -W | -w\n"); exit(X_STARTUP); } /* * Check to see if a disk is currently mounted. */ static char * getmntpt(char *name, int *mntflagsp) { long mntsize, i; struct statfs *mntbuf; mntsize = getmntinfo(&mntbuf, MNT_NOWAIT); for (i = 0; i < mntsize; i++) { if (!strcmp(mntbuf[i].f_mntfromname, name)) { *mntflagsp = mntbuf[i].f_flags; return (mntbuf[i].f_mntonname); } } return (0); } /* * Pick up a numeric argument. It must be nonnegative and in the given * range (except that a vmax of 0 means unlimited). */ static long numarg(const char *meaning, long vmin, long vmax) { char *p; long val; val = strtol(optarg, &p, 10); if (*p) errx(1, "illegal %s -- %s", meaning, optarg); if (val < vmin || (vmax && val > vmax)) errx(1, "%s must be between %ld and %ld", meaning, vmin, vmax); return (val); } void sig(int signo) { switch(signo) { case SIGALRM: case SIGBUS: case SIGFPE: case SIGHUP: case SIGTERM: case SIGTRAP: if (pipeout) quit("Signal on pipe: cannot recover\n"); msg("Rewriting attempted as response to unknown signal.\n"); (void)fflush(stderr); (void)fflush(stdout); close_rewind(); exit(X_REWRITE); /* NOTREACHED */ case SIGSEGV: msg("SIGSEGV: ABORTING!\n"); (void)signal(SIGSEGV, SIG_DFL); (void)kill(0, SIGSEGV); /* NOTREACHED */ } } char * rawname(char *cp) { struct stat sb; /* * Ensure that the device passed in is a raw device. */ if (stat(cp, &sb) == 0 && (sb.st_mode & S_IFMT) == S_IFCHR) return (cp); /* * Since there's only one device type now, we can't construct any * better name, so we have to return NULL. */ return (NULL); } /* * obsolete -- * Change set of key letters and ordered arguments into something * getopt(3) will like. */ static void obsolete(int *argcp, char **argvp[]) { int argc, flags; char *ap, **argv, *flagsp, **nargv, *p; /* Setup. */ argv = *argvp; argc = *argcp; /* * Return if no arguments or first argument has leading * dash or slash. */ ap = argv[1]; if (argc == 1 || *ap == '-' || *ap == '/') return; /* Allocate space for new arguments. */ if ((*argvp = nargv = malloc((argc + 1) * sizeof(char *))) == NULL || (p = flagsp = malloc(strlen(ap) + 2)) == NULL) err(1, NULL); *nargv++ = *argv; argv += 2; for (flags = 0; *ap; ++ap) { switch (*ap) { case 'B': case 'b': case 'd': case 'f': case 'D': case 'C': case 'h': case 's': case 'T': if (*argv == NULL) { warnx("option requires an argument -- %c", *ap); usage(); } if ((nargv[0] = malloc(strlen(*argv) + 2 + 1)) == NULL) err(1, NULL); nargv[0][0] = '-'; nargv[0][1] = *ap; (void)strcpy(&nargv[0][2], *argv); ++argv; ++nargv; break; default: if (!flags) { *p++ = '-'; flags = 1; } *p++ = *ap; break; } } /* Terminate flags. */ if (flags) { *p = '\0'; *nargv++ = flagsp; } else free(flagsp); /* Copy remaining arguments. */ while ((*nargv++ = *argv++)); /* Update argument count. */ *argcp = nargv - *argvp - 1; } Index: projects/clang700-import/sbin/dump/traverse.c =================================================================== --- projects/clang700-import/sbin/dump/traverse.c (revision 340426) +++ projects/clang700-import/sbin/dump/traverse.c (revision 340427) @@ -1,1014 +1,1014 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1988, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint #if 0 static char sccsid[] = "@(#)traverse.c 8.7 (Berkeley) 6/15/95"; #endif static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dump.h" union dinode { struct ufs1_dinode dp1; struct ufs2_dinode dp2; }; #define DIP(dp, field) \ ((sblock->fs_magic == FS_UFS1_MAGIC) ? \ (dp)->dp1.field : (dp)->dp2.field) #define DIP_SET(dp, field, val) do {\ if (sblock->fs_magic == FS_UFS1_MAGIC) \ (dp)->dp1.field = (val); \ else \ (dp)->dp2.field = (val); \ } while (0) #define HASDUMPEDFILE 0x1 #define HASSUBDIRS 0x2 static int dirindir(ino_t ino, ufs2_daddr_t blkno, int level, long *size, long *tapesize, int nodump, ino_t maxino); static void dmpindir(union dinode *dp, ino_t ino, ufs2_daddr_t blk, int level, off_t *size); static void ufs1_blksout(ufs1_daddr_t *blkp, int frags, ino_t ino); static void ufs2_blksout(union dinode *dp, ufs2_daddr_t *blkp, int frags, ino_t ino, int last); static int appendextdata(union dinode *dp); static void writeextdata(union dinode *dp, ino_t ino, int added); static int searchdir(ino_t ino, ufs2_daddr_t blkno, long size, long filesize, long *tapesize, int nodump, ino_t maxino); static long blockest(union dinode *dp); /* * This is an estimation of the number of TP_BSIZE blocks in the file. * It estimates the number of blocks in files with holes by assuming * that all of the blocks accounted for by di_blocks are data blocks * (when some of the blocks are usually used for indirect pointers); * hence the estimate may be high. */ static long blockest(union dinode *dp) { long blkest, sizeest; /* * dp->di_size is the size of the file in bytes. * dp->di_blocks stores the number of sectors actually in the file. * If there are more sectors than the size would indicate, this just * means that there are indirect blocks in the file or unused * sectors in the last file block; we can safely ignore these * (blkest = sizeest below). * If the file is bigger than the number of sectors would indicate, * then the file has holes in it. In this case we must use the * block count to estimate the number of data blocks used, but * we use the actual size for estimating the number of indirect * dump blocks (sizeest vs. blkest in the indirect block * calculation). */ if ((DIP(dp, di_flags) & SF_SNAPSHOT) != 0) return (1); blkest = howmany(dbtob(DIP(dp, di_blocks)), TP_BSIZE); sizeest = howmany(DIP(dp, di_size), TP_BSIZE); if (blkest > sizeest) blkest = sizeest; if (DIP(dp, di_size) > sblock->fs_bsize * UFS_NDADDR) { /* calculate the number of indirect blocks on the dump tape */ blkest += howmany(sizeest - UFS_NDADDR * sblock->fs_bsize / TP_BSIZE, TP_NINDIR); } return (blkest + 1); } /* Auxiliary macro to pick up files changed since previous dump. */ #define CHANGEDSINCE(dp, t) \ (DIP(dp, di_mtime) >= (t) || DIP(dp, di_ctime) >= (t)) /* The WANTTODUMP macro decides whether a file should be dumped. */ #ifdef UF_NODUMP #define WANTTODUMP(dp) \ (CHANGEDSINCE(dp, spcl.c_ddate) && \ (nonodump || (DIP(dp, di_flags) & UF_NODUMP) != UF_NODUMP)) #else #define WANTTODUMP(dp) CHANGEDSINCE(dp, spcl.c_ddate) #endif /* * Dump pass 1. * * Walk the inode list for a file system to find all allocated inodes * that have been modified since the previous dump time. Also, find all * the directories in the file system. */ int mapfiles(ino_t maxino, long *tapesize) { int i, cg, mode, inosused; int anydirskipped = 0; union dinode *dp; struct cg *cgp; ino_t ino; u_char *cp; if ((cgp = malloc(sblock->fs_cgsize)) == NULL) quit("mapfiles: cannot allocate memory.\n"); for (cg = 0; cg < sblock->fs_ncg; cg++) { ino = cg * sblock->fs_ipg; blkread(fsbtodb(sblock, cgtod(sblock, cg)), (char *)cgp, sblock->fs_cgsize); if (sblock->fs_magic == FS_UFS2_MAGIC) inosused = cgp->cg_initediblk; else inosused = sblock->fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (sblock->fs_flags & FS_DOSOFTDEP) { if (!cg_chkmagic(cgp)) quit("mapfiles: cg %d: bad magic number\n", cg); cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused <= 0) continue; } for (i = 0; i < inosused; i++, ino++) { if (ino < UFS_ROOTINO || - (dp = getinode(ino, &mode)) == NULL || + (dp = getino(ino, &mode)) == NULL || (mode & IFMT) == 0) continue; if (ino >= maxino) { msg("Skipping inode %ju >= maxino %ju\n", (uintmax_t)ino, (uintmax_t)maxino); continue; } /* * Everything must go in usedinomap so that a check * for "in dumpdirmap but not in usedinomap" to detect * dirs with nodump set has a chance of succeeding * (this is used in mapdirs()). */ SETINO(ino, usedinomap); if (mode == IFDIR) SETINO(ino, dumpdirmap); if (WANTTODUMP(dp)) { SETINO(ino, dumpinomap); if (mode != IFREG && mode != IFDIR && mode != IFLNK) *tapesize += 1; else *tapesize += blockest(dp); continue; } if (mode == IFDIR) { if (!nonodump && (DIP(dp, di_flags) & UF_NODUMP)) CLRINO(ino, usedinomap); anydirskipped = 1; } } } /* * Restore gets very upset if the root is not dumped, * so ensure that it always is dumped. */ SETINO(UFS_ROOTINO, dumpinomap); return (anydirskipped); } /* * Dump pass 2. * * Scan each directory on the file system to see if it has any modified * files in it. If it does, and has not already been added to the dump * list (because it was itself modified), then add it. If a directory * has not been modified itself, contains no modified files and has no * subdirectories, then it can be deleted from the dump list and from * the list of directories. By deleting it from the list of directories, * its parent may now qualify for the same treatment on this or a later * pass using this algorithm. */ int mapdirs(ino_t maxino, long *tapesize) { union dinode *dp; int i, isdir, nodump; char *map; ino_t ino; union dinode di; long filesize; int ret, change = 0; isdir = 0; /* XXX just to get gcc to shut up */ for (map = dumpdirmap, ino = 1; ino < maxino; ino++) { if (((ino - 1) % CHAR_BIT) == 0) /* map is offset by 1 */ isdir = *map++; else isdir >>= 1; /* * If a directory has been removed from usedinomap, it * either has the nodump flag set, or has inherited * it. Although a directory can't be in dumpinomap if * it isn't in usedinomap, we have to go through it to * propagate the nodump flag. */ nodump = !nonodump && (TSTINO(ino, usedinomap) == 0); if ((isdir & 1) == 0 || (TSTINO(ino, dumpinomap) && !nodump)) continue; - dp = getinode(ino, &i); + dp = getino(ino, &i); /* * inode buf may change in searchdir(). */ if (sblock->fs_magic == FS_UFS1_MAGIC) di.dp1 = dp->dp1; else di.dp2 = dp->dp2; filesize = DIP(&di, di_size); for (ret = 0, i = 0; filesize > 0 && i < UFS_NDADDR; i++) { if (DIP(&di, di_db[i]) != 0) ret |= searchdir(ino, DIP(&di, di_db[i]), (long)sblksize(sblock, DIP(&di, di_size), i), filesize, tapesize, nodump, maxino); if (ret & HASDUMPEDFILE) filesize = 0; else filesize -= sblock->fs_bsize; } for (i = 0; filesize > 0 && i < UFS_NIADDR; i++) { if (DIP(&di, di_ib[i]) == 0) continue; ret |= dirindir(ino, DIP(&di, di_ib[i]), i, &filesize, tapesize, nodump, maxino); } if (ret & HASDUMPEDFILE) { SETINO(ino, dumpinomap); *tapesize += blockest(&di); change = 1; continue; } if (nodump) { if (ret & HASSUBDIRS) change = 1; /* subdirs inherit nodump */ CLRINO(ino, dumpdirmap); } else if ((ret & HASSUBDIRS) == 0) if (!TSTINO(ino, dumpinomap)) { CLRINO(ino, dumpdirmap); change = 1; } } return (change); } /* * Read indirect blocks, and pass the data blocks to be searched * as directories. Quit as soon as any entry is found that will * require the directory to be dumped. */ static int dirindir( ino_t ino, ufs2_daddr_t blkno, int ind_level, long *filesize, long *tapesize, int nodump, ino_t maxino) { union { ufs1_daddr_t ufs1[MAXBSIZE / sizeof(ufs1_daddr_t)]; ufs2_daddr_t ufs2[MAXBSIZE / sizeof(ufs2_daddr_t)]; } idblk; int ret = 0; int i; blkread(fsbtodb(sblock, blkno), (char *)&idblk, (int)sblock->fs_bsize); if (ind_level <= 0) { for (i = 0; *filesize > 0 && i < NINDIR(sblock); i++) { if (sblock->fs_magic == FS_UFS1_MAGIC) blkno = idblk.ufs1[i]; else blkno = idblk.ufs2[i]; if (blkno != 0) ret |= searchdir(ino, blkno, sblock->fs_bsize, *filesize, tapesize, nodump, maxino); if (ret & HASDUMPEDFILE) *filesize = 0; else *filesize -= sblock->fs_bsize; } return (ret); } ind_level--; for (i = 0; *filesize > 0 && i < NINDIR(sblock); i++) { if (sblock->fs_magic == FS_UFS1_MAGIC) blkno = idblk.ufs1[i]; else blkno = idblk.ufs2[i]; if (blkno != 0) ret |= dirindir(ino, blkno, ind_level, filesize, tapesize, nodump, maxino); } return (ret); } /* * Scan a disk block containing directory information looking to see if * any of the entries are on the dump list and to see if the directory * contains any subdirectories. */ static int searchdir( ino_t ino, ufs2_daddr_t blkno, long size, long filesize, long *tapesize, int nodump, ino_t maxino) { int mode; struct direct *dp; union dinode *ip; long loc, ret = 0; static caddr_t dblk; if (dblk == NULL && (dblk = malloc(sblock->fs_bsize)) == NULL) quit("searchdir: cannot allocate indirect memory.\n"); blkread(fsbtodb(sblock, blkno), dblk, (int)size); if (filesize < size) size = filesize; for (loc = 0; loc < size; ) { dp = (struct direct *)(dblk + loc); if (dp->d_reclen == 0) { msg("corrupted directory, inumber %ju\n", (uintmax_t)ino); break; } loc += dp->d_reclen; if (dp->d_ino == 0) continue; if (dp->d_ino >= maxino) { msg("corrupted directory entry, d_ino %ju >= %ju\n", (uintmax_t)dp->d_ino, (uintmax_t)maxino); break; } if (dp->d_name[0] == '.') { if (dp->d_name[1] == '\0') continue; if (dp->d_name[1] == '.' && dp->d_name[2] == '\0') continue; } if (nodump) { - ip = getinode(dp->d_ino, &mode); + ip = getino(dp->d_ino, &mode); if (TSTINO(dp->d_ino, dumpinomap)) { CLRINO(dp->d_ino, dumpinomap); *tapesize -= blockest(ip); } /* * Add back to dumpdirmap and remove from usedinomap * to propagate nodump. */ if (mode == IFDIR) { SETINO(dp->d_ino, dumpdirmap); CLRINO(dp->d_ino, usedinomap); ret |= HASSUBDIRS; } } else { if (TSTINO(dp->d_ino, dumpinomap)) { ret |= HASDUMPEDFILE; if (ret & HASSUBDIRS) break; } if (TSTINO(dp->d_ino, dumpdirmap)) { ret |= HASSUBDIRS; if (ret & HASDUMPEDFILE) break; } } } return (ret); } /* * Dump passes 3 and 4. * * Dump the contents of an inode to tape. */ void dumpino(union dinode *dp, ino_t ino) { int ind_level, cnt, last, added; off_t size; char buf[TP_BSIZE]; if (newtape) { newtape = 0; dumpmap(dumpinomap, TS_BITS, ino); } CLRINO(ino, dumpinomap); /* * Zero out the size of a snapshot so that it will be dumped * as a zero length file. */ if ((DIP(dp, di_flags) & SF_SNAPSHOT) != 0) { DIP_SET(dp, di_size, 0); DIP_SET(dp, di_flags, DIP(dp, di_flags) & ~SF_SNAPSHOT); } if (sblock->fs_magic == FS_UFS1_MAGIC) { spcl.c_mode = dp->dp1.di_mode; spcl.c_size = dp->dp1.di_size; spcl.c_extsize = 0; spcl.c_atime = _time32_to_time(dp->dp1.di_atime); spcl.c_atimensec = dp->dp1.di_atimensec; spcl.c_mtime = _time32_to_time(dp->dp1.di_mtime); spcl.c_mtimensec = dp->dp1.di_mtimensec; spcl.c_birthtime = 0; spcl.c_birthtimensec = 0; spcl.c_rdev = dp->dp1.di_rdev; spcl.c_file_flags = dp->dp1.di_flags; spcl.c_uid = dp->dp1.di_uid; spcl.c_gid = dp->dp1.di_gid; } else { spcl.c_mode = dp->dp2.di_mode; spcl.c_size = dp->dp2.di_size; spcl.c_extsize = dp->dp2.di_extsize; spcl.c_atime = _time64_to_time(dp->dp2.di_atime); spcl.c_atimensec = dp->dp2.di_atimensec; spcl.c_mtime = _time64_to_time(dp->dp2.di_mtime); spcl.c_mtimensec = dp->dp2.di_mtimensec; spcl.c_birthtime = _time64_to_time(dp->dp2.di_birthtime); spcl.c_birthtimensec = dp->dp2.di_birthnsec; spcl.c_rdev = dp->dp2.di_rdev; spcl.c_file_flags = dp->dp2.di_flags; spcl.c_uid = dp->dp2.di_uid; spcl.c_gid = dp->dp2.di_gid; } spcl.c_type = TS_INODE; spcl.c_count = 0; switch (DIP(dp, di_mode) & S_IFMT) { case 0: /* * Freed inode. */ return; case S_IFLNK: /* * Check for short symbolic link. */ if (DIP(dp, di_size) > 0 && DIP(dp, di_size) < sblock->fs_maxsymlinklen) { spcl.c_addr[0] = 1; spcl.c_count = 1; added = appendextdata(dp); writeheader(ino); if (sblock->fs_magic == FS_UFS1_MAGIC) memmove(buf, (caddr_t)dp->dp1.di_db, (u_long)DIP(dp, di_size)); else memmove(buf, (caddr_t)dp->dp2.di_db, (u_long)DIP(dp, di_size)); buf[DIP(dp, di_size)] = '\0'; writerec(buf, 0); writeextdata(dp, ino, added); return; } /* FALLTHROUGH */ case S_IFDIR: case S_IFREG: if (DIP(dp, di_size) > 0) break; /* FALLTHROUGH */ case S_IFIFO: case S_IFSOCK: case S_IFCHR: case S_IFBLK: added = appendextdata(dp); writeheader(ino); writeextdata(dp, ino, added); return; default: msg("Warning: undefined file type 0%o\n", DIP(dp, di_mode) & IFMT); return; } if (DIP(dp, di_size) > UFS_NDADDR * sblock->fs_bsize) { cnt = UFS_NDADDR * sblock->fs_frag; last = 0; } else { cnt = howmany(DIP(dp, di_size), sblock->fs_fsize); last = 1; } if (sblock->fs_magic == FS_UFS1_MAGIC) ufs1_blksout(&dp->dp1.di_db[0], cnt, ino); else ufs2_blksout(dp, &dp->dp2.di_db[0], cnt, ino, last); if ((size = DIP(dp, di_size) - UFS_NDADDR * sblock->fs_bsize) <= 0) return; for (ind_level = 0; ind_level < UFS_NIADDR; ind_level++) { dmpindir(dp, ino, DIP(dp, di_ib[ind_level]), ind_level, &size); if (size <= 0) return; } } /* * Read indirect blocks, and pass the data blocks to be dumped. */ static void dmpindir(union dinode *dp, ino_t ino, ufs2_daddr_t blk, int ind_level, off_t *size) { union { ufs1_daddr_t ufs1[MAXBSIZE / sizeof(ufs1_daddr_t)]; ufs2_daddr_t ufs2[MAXBSIZE / sizeof(ufs2_daddr_t)]; } idblk; int i, cnt, last; if (blk != 0) blkread(fsbtodb(sblock, blk), (char *)&idblk, (int)sblock->fs_bsize); else memset(&idblk, 0, sblock->fs_bsize); if (ind_level <= 0) { if (*size > NINDIR(sblock) * sblock->fs_bsize) { cnt = NINDIR(sblock) * sblock->fs_frag; last = 0; } else { cnt = howmany(*size, sblock->fs_fsize); last = 1; } *size -= NINDIR(sblock) * sblock->fs_bsize; if (sblock->fs_magic == FS_UFS1_MAGIC) ufs1_blksout(idblk.ufs1, cnt, ino); else ufs2_blksout(dp, idblk.ufs2, cnt, ino, last); return; } ind_level--; for (i = 0; i < NINDIR(sblock); i++) { if (sblock->fs_magic == FS_UFS1_MAGIC) dmpindir(dp, ino, idblk.ufs1[i], ind_level, size); else dmpindir(dp, ino, idblk.ufs2[i], ind_level, size); if (*size <= 0) return; } } /* * Collect up the data into tape record sized buffers and output them. */ static void ufs1_blksout(ufs1_daddr_t *blkp, int frags, ino_t ino) { ufs1_daddr_t *bp; int i, j, count, blks, tbperdb; blks = howmany(frags * sblock->fs_fsize, TP_BSIZE); tbperdb = sblock->fs_bsize >> tp_bshift; for (i = 0; i < blks; i += TP_NINDIR) { if (i + TP_NINDIR > blks) count = blks; else count = i + TP_NINDIR; assert(count <= TP_NINDIR + i); for (j = i; j < count; j++) if (blkp[j / tbperdb] != 0) spcl.c_addr[j - i] = 1; else spcl.c_addr[j - i] = 0; spcl.c_count = count - i; writeheader(ino); bp = &blkp[i / tbperdb]; for (j = i; j < count; j += tbperdb, bp++) if (*bp != 0) { if (j + tbperdb <= count) dumpblock(*bp, (int)sblock->fs_bsize); else dumpblock(*bp, (count - j) * TP_BSIZE); } spcl.c_type = TS_ADDR; } } /* * Collect up the data into tape record sized buffers and output them. */ static void ufs2_blksout(union dinode *dp, ufs2_daddr_t *blkp, int frags, ino_t ino, int last) { ufs2_daddr_t *bp; int i, j, count, resid, blks, tbperdb, added; static int writingextdata = 0; /* * Calculate the number of TP_BSIZE blocks to be dumped. * For filesystems with a fragment size bigger than TP_BSIZE, * only part of the final fragment may need to be dumped. */ blks = howmany(frags * sblock->fs_fsize, TP_BSIZE); if (last) { if (writingextdata) resid = howmany(fragoff(sblock, spcl.c_extsize), TP_BSIZE); else resid = howmany(fragoff(sblock, dp->dp2.di_size), TP_BSIZE); if (resid > 0) blks -= howmany(sblock->fs_fsize, TP_BSIZE) - resid; } tbperdb = sblock->fs_bsize >> tp_bshift; for (i = 0; i < blks; i += TP_NINDIR) { if (i + TP_NINDIR > blks) count = blks; else count = i + TP_NINDIR; assert(count <= TP_NINDIR + i); for (j = i; j < count; j++) if (blkp[j / tbperdb] != 0) spcl.c_addr[j - i] = 1; else spcl.c_addr[j - i] = 0; spcl.c_count = count - i; if (last && count == blks && !writingextdata) added = appendextdata(dp); writeheader(ino); bp = &blkp[i / tbperdb]; for (j = i; j < count; j += tbperdb, bp++) if (*bp != 0) { if (j + tbperdb <= count) dumpblock(*bp, (int)sblock->fs_bsize); else dumpblock(*bp, (count - j) * TP_BSIZE); } spcl.c_type = TS_ADDR; spcl.c_count = 0; if (last && count == blks && !writingextdata) { writingextdata = 1; writeextdata(dp, ino, added); writingextdata = 0; } } } /* * If there is room in the current block for the extended attributes * as well as the file data, update the header to reflect the added * attribute data at the end. Attributes are placed at the end so that * old versions of restore will correctly restore the file and simply * discard the extra data at the end that it does not understand. * The attribute data is dumped following the file data by the * writeextdata() function (below). */ static int appendextdata(union dinode *dp) { int i, blks, tbperdb; /* * If no extended attributes, there is nothing to do. */ if (spcl.c_extsize == 0) return (0); /* * If there is not enough room at the end of this block * to add the extended attributes, then rather than putting * part of them here, we simply push them entirely into a * new block rather than putting some here and some later. */ if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize) blks = howmany(UFS_NXADDR * sblock->fs_bsize, TP_BSIZE); else blks = howmany(spcl.c_extsize, TP_BSIZE); if (spcl.c_count + blks > TP_NINDIR) return (0); /* * Update the block map in the header to indicate the added * extended attribute. They will be appended after the file * data by the writeextdata() routine. */ tbperdb = sblock->fs_bsize >> tp_bshift; assert(spcl.c_count + blks < TP_NINDIR); for (i = 0; i < blks; i++) if (&dp->dp2.di_extb[i / tbperdb] != 0) spcl.c_addr[spcl.c_count + i] = 1; else spcl.c_addr[spcl.c_count + i] = 0; spcl.c_count += blks; return (blks); } /* * Dump the extended attribute data. If there was room in the file * header, then all we need to do is output the data blocks. If there * was not room in the file header, then an additional TS_ADDR header * is created to hold the attribute data. */ static void writeextdata(union dinode *dp, ino_t ino, int added) { int i, frags, blks, tbperdb, last; ufs2_daddr_t *bp; off_t size; /* * If no extended attributes, there is nothing to do. */ if (spcl.c_extsize == 0) return; /* * If there was no room in the file block for the attributes, * dump them out in a new block, otherwise just dump the data. */ if (added == 0) { if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize) { frags = UFS_NXADDR * sblock->fs_frag; last = 0; } else { frags = howmany(spcl.c_extsize, sblock->fs_fsize); last = 1; } ufs2_blksout(dp, &dp->dp2.di_extb[0], frags, ino, last); } else { if (spcl.c_extsize > UFS_NXADDR * sblock->fs_bsize) blks = howmany(UFS_NXADDR * sblock->fs_bsize, TP_BSIZE); else blks = howmany(spcl.c_extsize, TP_BSIZE); tbperdb = sblock->fs_bsize >> tp_bshift; for (i = 0; i < blks; i += tbperdb) { bp = &dp->dp2.di_extb[i / tbperdb]; if (*bp != 0) { if (i + tbperdb <= blks) dumpblock(*bp, (int)sblock->fs_bsize); else dumpblock(*bp, (blks - i) * TP_BSIZE); } } } /* * If an indirect block is added for extended attributes, then * di_exti below should be changed to the structure element * that references the extended attribute indirect block. This * definition is here only to make it compile without complaint. */ #define di_exti di_spare[0] /* * If the extended attributes fall into an indirect block, * dump it as well. */ if ((size = spcl.c_extsize - UFS_NXADDR * sblock->fs_bsize) > 0) dmpindir(dp, ino, dp->dp2.di_exti, 0, &size); } /* * Dump a map to the tape. */ void dumpmap(char *map, int type, ino_t ino) { int i; char *cp; spcl.c_type = type; spcl.c_count = howmany(mapsize * sizeof(char), TP_BSIZE); writeheader(ino); for (i = 0, cp = map; i < spcl.c_count; i++, cp += TP_BSIZE) writerec(cp, 0); } /* * Write a header record to the dump tape. */ void writeheader(ino_t ino) { int32_t sum, cnt, *lp; if (rsync_friendly >= 2) { /* don't track changes to access time */ spcl.c_atime = spcl.c_mtime; spcl.c_atimensec = spcl.c_mtimensec; } spcl.c_inumber = ino; spcl.c_magic = FS_UFS2_MAGIC; spcl.c_checksum = 0; lp = (int32_t *)&spcl; sum = 0; cnt = sizeof(union u_spcl) / (4 * sizeof(int32_t)); while (--cnt >= 0) { sum += *lp++; sum += *lp++; sum += *lp++; sum += *lp++; } spcl.c_checksum = CHECKSUM - sum; writerec((char *)&spcl, 1); } union dinode * -getinode(ino_t inum, int *modep) +getino(ino_t inum, int *modep) { static ino_t minino, maxino; static caddr_t inoblock; struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; if (inoblock == NULL && (inoblock = malloc(sblock->fs_bsize)) == NULL) quit("cannot allocate inode memory.\n"); curino = inum; if (inum >= minino && inum < maxino) goto gotit; blkread(fsbtodb(sblock, ino_to_fsba(sblock, inum)), inoblock, (int)sblock->fs_bsize); minino = inum - (inum % INOPB(sblock)); maxino = minino + INOPB(sblock); gotit: if (sblock->fs_magic == FS_UFS1_MAGIC) { dp1 = &((struct ufs1_dinode *)inoblock)[inum - minino]; *modep = (dp1->di_mode & IFMT); return ((union dinode *)dp1); } dp2 = &((struct ufs2_dinode *)inoblock)[inum - minino]; *modep = (dp2->di_mode & IFMT); return ((union dinode *)dp2); } /* * Read a chunk of data from the disk. * Try to recover from hard errors by reading in sector sized pieces. * Error recovery is attempted at most BREADEMAX times before seeking * consent from the operator to continue. */ int breaderrors = 0; #define BREADEMAX 32 void blkread(ufs2_daddr_t blkno, char *buf, int size) { int secsize, bytes, resid, xfer, base, cnt, i; static char *tmpbuf; off_t offset; loop: offset = blkno << dev_bshift; secsize = sblock->fs_fsize; base = offset % secsize; resid = size % secsize; /* * If the transfer request starts or ends on a non-sector * boundary, we must read the entire sector and copy out * just the part that we need. */ if (base == 0 && resid == 0) { cnt = cread(diskfd, buf, size, offset); if (cnt == size) return; } else { if (tmpbuf == NULL && (tmpbuf = malloc(secsize)) == NULL) quit("buffer malloc failed\n"); xfer = 0; bytes = size; if (base != 0) { cnt = cread(diskfd, tmpbuf, secsize, offset - base); if (cnt != secsize) goto bad; xfer = MIN(secsize - base, size); offset += xfer; bytes -= xfer; resid = bytes % secsize; memcpy(buf, &tmpbuf[base], xfer); } if (bytes >= secsize) { cnt = cread(diskfd, &buf[xfer], bytes - resid, offset); if (cnt != bytes - resid) goto bad; xfer += cnt; offset += cnt; } if (resid == 0) return; cnt = cread(diskfd, tmpbuf, secsize, offset); if (cnt == secsize) { memcpy(&buf[xfer], tmpbuf, resid); return; } } bad: if (blkno + (size / dev_bsize) > fsbtodb(sblock, sblock->fs_size)) { /* * Trying to read the final fragment. * * NB - dump only works in TP_BSIZE blocks, hence * rounds `dev_bsize' fragments up to TP_BSIZE pieces. * It should be smarter about not actually trying to * read more than it can get, but for the time being * we punt and scale back the read only when it gets * us into trouble. (mkm 9/25/83) */ size -= dev_bsize; goto loop; } if (cnt == -1) msg("read error from %s: %s: [block %jd]: count=%d\n", disk, strerror(errno), (intmax_t)blkno, size); else msg("short read error from %s: [block %jd]: count=%d, got=%d\n", disk, (intmax_t)blkno, size, cnt); if (++breaderrors > BREADEMAX) { msg("More than %d block read errors from %s\n", BREADEMAX, disk); broadcast("DUMP IS AILING!\n"); msg("This is an unrecoverable error.\n"); if (!query("Do you want to attempt to continue?")){ dumpabort(0); /*NOTREACHED*/ } else breaderrors = 0; } /* * Zero buffer, then try to read each sector of buffer separately, * and bypass the cache. */ memset(buf, 0, size); for (i = 0; i < size; i += dev_bsize, buf += dev_bsize, blkno++) { if ((cnt = pread(diskfd, buf, (int)dev_bsize, ((off_t)blkno << dev_bshift))) == dev_bsize) continue; if (cnt == -1) { msg("read error from %s: %s: [sector %jd]: count=%ld\n", disk, strerror(errno), (intmax_t)blkno, dev_bsize); continue; } msg("short read from %s: [sector %jd]: count=%ld, got=%d\n", disk, (intmax_t)blkno, dev_bsize, cnt); } } Index: projects/clang700-import/sbin/ffsinfo/ffsinfo.c =================================================================== --- projects/clang700-import/sbin/ffsinfo/ffsinfo.c (revision 340426) +++ projects/clang700-import/sbin/ffsinfo/ffsinfo.c (revision 340427) @@ -1,648 +1,649 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2000 Christoph Herrmann, Thomas-Henning von Kamptz * Copyright (c) 1980, 1989, 1993 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Christoph Herrmann and Thomas-Henning von Kamptz, Munich and Frankfurt. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgment: * This product includes software developed by the University of * California, Berkeley and its contributors, as well as Christoph * Herrmann and Thomas-Henning von Kamptz. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $TSHeader: src/sbin/ffsinfo/ffsinfo.c,v 1.4 2000/12/12 19:30:55 tomsoft Exp $ * */ #ifndef lint static const char copyright[] = "@(#) Copyright (c) 2000 Christoph Herrmann, Thomas-Henning von Kamptz\n\ Copyright (c) 1980, 1989, 1993 The Regents of the University of California.\n\ All rights reserved.\n"; #endif /* not lint */ #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ /* ********************************************************** INCLUDES ***** */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "debug.h" /* *********************************************************** GLOBALS ***** */ #ifdef FS_DEBUG int _dbg_lvl_ = (DL_INFO); /* DL_TRC */ #endif /* FS_DEBUG */ static struct uufsd disk; #define sblock disk.d_fs #define acg disk.d_cg static union { struct fs fs; char pad[SBLOCKSIZE]; } fsun; #define osblock fsun.fs static char i1blk[MAXBSIZE]; static char i2blk[MAXBSIZE]; static char i3blk[MAXBSIZE]; static struct csum *fscs; /* ******************************************************** PROTOTYPES ***** */ static void usage(void); static void dump_whole_ufs1_inode(ino_t, int); static void dump_whole_ufs2_inode(ino_t, int); #define DUMP_WHOLE_INODE(A,B) \ ( disk.d_ufs == 1 \ ? dump_whole_ufs1_inode((A),(B)) : dump_whole_ufs2_inode((A),(B)) ) /* ************************************************************** main ***** */ /* * ffsinfo(8) is a tool to dump all metadata of a file system. It helps to find * errors is the file system much easier. You can run ffsinfo before and after * an fsck(8), and compare the two ascii dumps easy with diff, and you see * directly where the problem is. You can control how much detail you want to * see with some command line arguments. You can also easy check the status * of a file system, like is there is enough space for growing a file system, * or how many active snapshots do we have. It provides much more detailed * information then dumpfs. Snapshots, as they are very new, are not really * supported. They are just mentioned currently, but it is planned to run * also over active snapshots, to even get that output. */ int main(int argc, char **argv) { DBG_FUNC("main") char *device, *special; int ch; size_t len; struct stat st; struct csum *dbg_csp; int dbg_csc; char dbg_line[80]; int cylno,i; int cfg_cg, cfg_in, cfg_lv; int cg_start, cg_stop; ino_t in; char *out_file; DBG_ENTER; cfg_lv = 0xff; cfg_in = -2; cfg_cg = -2; out_file = strdup("-"); while ((ch = getopt(argc, argv, "g:i:l:o:")) != -1) { switch (ch) { case 'g': cfg_cg = strtol(optarg, NULL, 0); if (errno == EINVAL || errno == ERANGE) err(1, "%s", optarg); if (cfg_cg < -1) usage(); break; case 'i': cfg_in = strtol(optarg, NULL, 0); if (errno == EINVAL || errno == ERANGE) err(1, "%s", optarg); if (cfg_in < 0) usage(); break; case 'l': cfg_lv = strtol(optarg, NULL, 0); if (errno == EINVAL||errno == ERANGE) err(1, "%s", optarg); if (cfg_lv < 0x1 || cfg_lv > 0x3ff) usage(); break; case 'o': free(out_file); out_file = strdup(optarg); if (out_file == NULL) errx(1, "strdup failed"); break; case '?': /* FALLTHROUGH */ default: usage(); } } argc -= optind; argv += optind; if (argc != 1) usage(); device = *argv; /* * Now we try to guess the (raw)device name. */ if (0 == strrchr(device, '/') && stat(device, &st) == -1) { /*- * No path prefix was given, so try in this order: * /dev/r%s * /dev/%s * /dev/vinum/r%s * /dev/vinum/%s. * * FreeBSD now doesn't distinguish between raw and block * devices any longer, but it should still work this way. */ len = strlen(device) + strlen(_PATH_DEV) + 2 + strlen("vinum/"); special = (char *)malloc(len); if (special == NULL) errx(1, "malloc failed"); snprintf(special, len, "%sr%s", _PATH_DEV, device); if (stat(special, &st) == -1) { snprintf(special, len, "%s%s", _PATH_DEV, device); if (stat(special, &st) == -1) { snprintf(special, len, "%svinum/r%s", _PATH_DEV, device); if (stat(special, &st) == -1) /* For now this is the 'last resort' */ snprintf(special, len, "%svinum/%s", _PATH_DEV, device); } } device = special; } if (ufs_disk_fillout(&disk, device) == -1) err(1, "ufs_disk_fillout(%s) failed: %s", device, disk.d_error); DBG_OPEN(out_file); /* already here we need a superblock */ if (cfg_lv & 0x001) DBG_DUMP_FS(&sblock, "primary sblock"); /* Determine here what cylinder groups to dump */ if (cfg_cg==-2) { cg_start = 0; cg_stop = sblock.fs_ncg; } else if (cfg_cg == -1) { cg_start = sblock.fs_ncg - 1; cg_stop = sblock.fs_ncg; } else if (cfg_cg < sblock.fs_ncg) { cg_start = cfg_cg; cg_stop = cfg_cg + 1; } else { cg_start = sblock.fs_ncg; cg_stop = sblock.fs_ncg; } if (cfg_lv & 0x004) { fscs = (struct csum *)calloc((size_t)1, (size_t)sblock.fs_cssize); if (fscs == NULL) errx(1, "calloc failed"); /* get the cylinder summary into the memory ... */ for (i = 0; i < sblock.fs_cssize; i += sblock.fs_bsize) { if (bread(&disk, fsbtodb(&sblock, sblock.fs_csaddr + numfrags(&sblock, i)), (void *)(((char *)fscs)+i), (size_t)(sblock.fs_cssize-i < sblock.fs_bsize ? sblock.fs_cssize - i : sblock.fs_bsize)) == -1) err(1, "bread: %s", disk.d_error); } dbg_csp = fscs; /* ... and dump it */ - for(dbg_csc=0; dbg_cscdi_nlink==0) { + if (dp.dp1->di_nlink == 0) { DBG_LEAVE; return; /* inode not in use */ } /* * Dump the main inode structure. */ snprintf(comment, sizeof(comment), "Inode 0x%08jx", (uintmax_t)inode); if (level & 0x100) { DBG_DUMP_INO(&sblock, comment, - ino); + dp.dp1); } if (!(level & 0x200)) { DBG_LEAVE; return; } /* * Ok, now prepare for dumping all direct and indirect pointers. */ - rb = howmany(ino->di_size, sblock.fs_bsize) - UFS_NDADDR; - if(rb>0) { + rb = howmany(dp.dp1->di_size, sblock.fs_bsize) - UFS_NDADDR; + if (rb > 0) { /* * Dump single indirect block. */ - if (bread(&disk, fsbtodb(&sblock, ino->di_ib[0]), (void *)&i1blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, dp.dp1->di_ib[0]), + (void *)&i1blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 0", (uintmax_t)inode); DBG_DUMP_IBLK(&sblock, comment, i1blk, (size_t)rb); - rb-=howmany(sblock.fs_bsize, sizeof(ufs1_daddr_t)); + rb -= howmany(sblock.fs_bsize, sizeof(ufs1_daddr_t)); } - if(rb>0) { + if (rb > 0) { /* * Dump double indirect blocks. */ - if (bread(&disk, fsbtodb(&sblock, ino->di_ib[1]), (void *)&i2blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, dp.dp1->di_ib[1]), + (void *)&i2blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 1", (uintmax_t)inode); DBG_DUMP_IBLK(&sblock, comment, i2blk, howmany(rb, howmany(sblock.fs_bsize, sizeof(ufs1_daddr_t)))); - for(ind2ctr=0; ((ind2ctr < howmany(sblock.fs_bsize, - sizeof(ufs1_daddr_t))) && (rb>0)); ind2ctr++) { - ind2ptr=&((ufs1_daddr_t *)(void *)&i2blk)[ind2ctr]; + for (ind2ctr = 0; ((ind2ctr < howmany(sblock.fs_bsize, + sizeof(ufs1_daddr_t))) && (rb > 0)); ind2ctr++) { + ind2ptr = &((ufs1_daddr_t *)(void *)&i2blk)[ind2ctr]; - if (bread(&disk, fsbtodb(&sblock, *ind2ptr), (void *)&i1blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, *ind2ptr), + (void *)&i1blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 1->%d", (uintmax_t)inode, ind2ctr); DBG_DUMP_IBLK(&sblock, comment, i1blk, (size_t)rb); - rb-=howmany(sblock.fs_bsize, sizeof(ufs1_daddr_t)); + rb -= howmany(sblock.fs_bsize, sizeof(ufs1_daddr_t)); } } - if(rb>0) { + if (rb > 0) { /* * Dump triple indirect blocks. */ - if (bread(&disk, fsbtodb(&sblock, ino->di_ib[2]), (void *)&i3blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, dp.dp1->di_ib[2]), + (void *)&i3blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 2", (uintmax_t)inode); #define SQUARE(a) ((a)*(a)) DBG_DUMP_IBLK(&sblock, comment, i3blk, howmany(rb, SQUARE(howmany(sblock.fs_bsize, sizeof(ufs1_daddr_t))))); #undef SQUARE - for(ind3ctr=0; ((ind3ctr0)); ind3ctr++) { - ind3ptr=&((ufs1_daddr_t *)(void *)&i3blk)[ind3ctr]; + for (ind3ctr = 0; ((ind3ctr < howmany(sblock.fs_bsize, + sizeof(ufs1_daddr_t))) && (rb > 0)); ind3ctr++) { + ind3ptr = &((ufs1_daddr_t *)(void *)&i3blk)[ind3ctr]; - if (bread(&disk, fsbtodb(&sblock, *ind3ptr), (void *)&i2blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, *ind3ptr), + (void *)&i2blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 2->%d", (uintmax_t)inode, ind3ctr); DBG_DUMP_IBLK(&sblock, comment, i2blk, howmany(rb, howmany(sblock.fs_bsize, sizeof(ufs1_daddr_t)))); - for(ind2ctr=0; ((ind2ctr < howmany(sblock.fs_bsize, - sizeof(ufs1_daddr_t)))&&(rb>0)); ind2ctr++) { + for (ind2ctr = 0; ((ind2ctr < howmany(sblock.fs_bsize, + sizeof(ufs1_daddr_t))) && (rb > 0)); ind2ctr++) { ind2ptr=&((ufs1_daddr_t *)(void *)&i2blk) [ind2ctr]; if (bread(&disk, fsbtodb(&sblock, *ind2ptr), (void *)&i1blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 2->%d->%d", (uintmax_t)inode, ind3ctr, ind3ctr); DBG_DUMP_IBLK(&sblock, comment, i1blk, (size_t)rb); - rb-=howmany(sblock.fs_bsize, + rb -= howmany(sblock.fs_bsize, sizeof(ufs1_daddr_t)); } } } DBG_LEAVE; return; } /* ********************************************** dump_whole_ufs2_inode ***** */ /* * Here we dump a list of all blocks allocated by this inode. We follow * all indirect blocks. */ void dump_whole_ufs2_inode(ino_t inode, int level) { DBG_FUNC("dump_whole_ufs2_inode") - struct ufs2_dinode *ino; - int rb, mode; + union dinodep dp; + int rb; unsigned int ind2ctr, ind3ctr; ufs2_daddr_t *ind2ptr, *ind3ptr; char comment[80]; DBG_ENTER; /* * Read the inode from disk/cache. */ - if (getino(&disk, (void **)&ino, inode, &mode) == -1) - err(1, "getino: %s", disk.d_error); + if (getinode(&disk, &dp, inode) == -1) + err(1, "getinode: %s", disk.d_error); - if (ino->di_nlink == 0) { + if (dp.dp2->di_nlink == 0) { DBG_LEAVE; return; /* inode not in use */ } /* * Dump the main inode structure. */ snprintf(comment, sizeof(comment), "Inode 0x%08jx", (uintmax_t)inode); if (level & 0x100) { - DBG_DUMP_INO(&sblock, comment, ino); + DBG_DUMP_INO(&sblock, comment, dp.dp2); } if (!(level & 0x200)) { DBG_LEAVE; return; } /* * Ok, now prepare for dumping all direct and indirect pointers. */ - rb = howmany(ino->di_size, sblock.fs_bsize) - UFS_NDADDR; + rb = howmany(dp.dp2->di_size, sblock.fs_bsize) - UFS_NDADDR; if (rb > 0) { /* * Dump single indirect block. */ - if (bread(&disk, fsbtodb(&sblock, ino->di_ib[0]), (void *)&i1blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, dp.dp2->di_ib[0]), + (void *)&i1blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 0", (uintmax_t)inode); DBG_DUMP_IBLK(&sblock, comment, i1blk, (size_t)rb); rb -= howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t)); } if (rb > 0) { /* * Dump double indirect blocks. */ - if (bread(&disk, fsbtodb(&sblock, ino->di_ib[1]), (void *)&i2blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, dp.dp2->di_ib[1]), + (void *)&i2blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 1", (uintmax_t)inode); DBG_DUMP_IBLK(&sblock, comment, i2blk, howmany(rb, howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t)))); for (ind2ctr = 0; ((ind2ctr < howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t))) && (rb>0)); ind2ctr++) { ind2ptr = &((ufs2_daddr_t *)(void *)&i2blk)[ind2ctr]; - if (bread(&disk, fsbtodb(&sblock, *ind2ptr), (void *)&i1blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, *ind2ptr), + (void *)&i1blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 1->%d", (uintmax_t)inode, ind2ctr); DBG_DUMP_IBLK(&sblock, comment, i1blk, (size_t)rb); rb -= howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t)); } } if (rb > 0) { /* * Dump triple indirect blocks. */ - if (bread(&disk, fsbtodb(&sblock, ino->di_ib[2]), (void *)&i3blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, dp.dp2->di_ib[2]), + (void *)&i3blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 2", (uintmax_t)inode); #define SQUARE(a) ((a)*(a)) DBG_DUMP_IBLK(&sblock, comment, i3blk, howmany(rb, SQUARE(howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t))))); #undef SQUARE for (ind3ctr = 0; ((ind3ctr < howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t))) && (rb > 0)); ind3ctr++) { ind3ptr = &((ufs2_daddr_t *)(void *)&i3blk)[ind3ctr]; - if (bread(&disk, fsbtodb(&sblock, *ind3ptr), (void *)&i2blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, *ind3ptr), + (void *)&i2blk, (size_t)sblock.fs_bsize) == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 2->%d", (uintmax_t)inode, ind3ctr); DBG_DUMP_IBLK(&sblock, comment, i2blk, howmany(rb, howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t)))); for (ind2ctr = 0; ((ind2ctr < howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t))) && (rb > 0)); ind2ctr++) { ind2ptr = &((ufs2_daddr_t *)(void *)&i2blk) [ind2ctr]; - if (bread(&disk, fsbtodb(&sblock, *ind2ptr), (void *)&i1blk, - (size_t)sblock.fs_bsize) == -1) { + if (bread(&disk, fsbtodb(&sblock, *ind2ptr), + (void *)&i1blk, (size_t)sblock.fs_bsize) + == -1) { err(1, "bread: %s", disk.d_error); } snprintf(comment, sizeof(comment), "Inode 0x%08jx: indirect 2->%d->%d", (uintmax_t)inode, ind3ctr, ind3ctr); DBG_DUMP_IBLK(&sblock, comment, i1blk, (size_t)rb); rb -= howmany(sblock.fs_bsize, sizeof(ufs2_daddr_t)); } } } DBG_LEAVE; return; } /* ************************************************************* usage ***** */ /* * Dump a line of usage. */ void usage(void) { DBG_FUNC("usage") DBG_ENTER; fprintf(stderr, "usage: ffsinfo [-g cylinder_group] [-i inode] [-l level] " "[-o outfile]\n" " special | file\n"); DBG_LEAVE; exit(1); } Index: projects/clang700-import/sbin/fsck_ffs/dir.c =================================================================== --- projects/clang700-import/sbin/fsck_ffs/dir.c (revision 340426) +++ projects/clang700-import/sbin/fsck_ffs/dir.c (revision 340427) @@ -1,712 +1,713 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char sccsid[] = "@(#)dir.c 8.8 (Berkeley) 4/28/95"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "fsck.h" static struct dirtemplate emptydir = { 0, DIRBLKSIZ, DT_UNKNOWN, 0, "", 0, 0, DT_UNKNOWN, 0, "" }; static struct dirtemplate dirhead = { 0, 12, DT_DIR, 1, ".", 0, DIRBLKSIZ - 12, DT_DIR, 2, ".." }; static int chgino(struct inodesc *); static int dircheck(struct inodesc *, struct direct *); static int expanddir(union dinode *dp, char *name); static void freedir(ino_t ino, ino_t parent); static struct direct *fsck_readdir(struct inodesc *); static struct bufarea *getdirblk(ufs2_daddr_t blkno, long size); static int lftempname(char *bufp, ino_t ino); static int mkentry(struct inodesc *); /* * Propagate connected state through the tree. */ void propagate(void) { struct inoinfo **inpp, *inp; struct inoinfo **inpend; long change; inpend = &inpsort[inplast]; do { change = 0; for (inpp = inpsort; inpp < inpend; inpp++) { inp = *inpp; if (inp->i_parent == 0) continue; if (inoinfo(inp->i_parent)->ino_state == DFOUND && INO_IS_DUNFOUND(inp->i_number)) { inoinfo(inp->i_number)->ino_state = DFOUND; change++; } } } while (change > 0); } /* * Scan each entry in a directory block. */ int dirscan(struct inodesc *idesc) { struct direct *dp; struct bufarea *bp; u_int dsize, n; long blksiz; char dbuf[DIRBLKSIZ]; if (idesc->id_type != DATA) errx(EEXIT, "wrong type to dirscan %d", idesc->id_type); if (idesc->id_entryno == 0 && (idesc->id_filesize & (DIRBLKSIZ - 1)) != 0) idesc->id_filesize = roundup(idesc->id_filesize, DIRBLKSIZ); blksiz = idesc->id_numfrags * sblock.fs_fsize; if (chkrange(idesc->id_blkno, idesc->id_numfrags)) { idesc->id_filesize -= blksiz; return (SKIP); } idesc->id_loc = 0; for (dp = fsck_readdir(idesc); dp != NULL; dp = fsck_readdir(idesc)) { dsize = dp->d_reclen; if (dsize > sizeof(dbuf)) dsize = sizeof(dbuf); memmove(dbuf, dp, (size_t)dsize); idesc->id_dirp = (struct direct *)dbuf; if ((n = (*idesc->id_func)(idesc)) & ALTERED) { bp = getdirblk(idesc->id_blkno, blksiz); memmove(bp->b_un.b_buf + idesc->id_loc - dsize, dbuf, (size_t)dsize); dirty(bp); sbdirty(); rerun = 1; } if (n & STOP) return (n); } return (idesc->id_filesize > 0 ? KEEPON : STOP); } /* * get next entry in a directory. */ static struct direct * fsck_readdir(struct inodesc *idesc) { struct direct *dp, *ndp; struct bufarea *bp; long size, blksiz, fix, dploc; blksiz = idesc->id_numfrags * sblock.fs_fsize; bp = getdirblk(idesc->id_blkno, blksiz); if (idesc->id_loc % DIRBLKSIZ == 0 && idesc->id_filesize > 0 && idesc->id_loc < blksiz) { dp = (struct direct *)(bp->b_un.b_buf + idesc->id_loc); if (dircheck(idesc, dp)) goto dpok; if (idesc->id_fix == IGNORE) return (0); fix = dofix(idesc, "DIRECTORY CORRUPTED"); bp = getdirblk(idesc->id_blkno, blksiz); dp = (struct direct *)(bp->b_un.b_buf + idesc->id_loc); dp->d_reclen = DIRBLKSIZ; dp->d_ino = 0; dp->d_type = 0; dp->d_namlen = 0; dp->d_name[0] = '\0'; if (fix) dirty(bp); idesc->id_loc += DIRBLKSIZ; idesc->id_filesize -= DIRBLKSIZ; return (dp); } dpok: if (idesc->id_filesize <= 0 || idesc->id_loc >= blksiz) return NULL; dploc = idesc->id_loc; dp = (struct direct *)(bp->b_un.b_buf + dploc); idesc->id_loc += dp->d_reclen; idesc->id_filesize -= dp->d_reclen; if ((idesc->id_loc % DIRBLKSIZ) == 0) return (dp); ndp = (struct direct *)(bp->b_un.b_buf + idesc->id_loc); if (idesc->id_loc < blksiz && idesc->id_filesize > 0 && dircheck(idesc, ndp) == 0) { size = DIRBLKSIZ - (idesc->id_loc % DIRBLKSIZ); idesc->id_loc += size; idesc->id_filesize -= size; if (idesc->id_fix == IGNORE) return (0); fix = dofix(idesc, "DIRECTORY CORRUPTED"); bp = getdirblk(idesc->id_blkno, blksiz); dp = (struct direct *)(bp->b_un.b_buf + dploc); dp->d_reclen += size; if (fix) dirty(bp); } return (dp); } /* * Verify that a directory entry is valid. * This is a superset of the checks made in the kernel. */ static int dircheck(struct inodesc *idesc, struct direct *dp) { size_t size; char *cp; u_char type; u_int8_t namlen; int spaceleft; spaceleft = DIRBLKSIZ - (idesc->id_loc % DIRBLKSIZ); if (dp->d_reclen == 0 || dp->d_reclen > spaceleft || (dp->d_reclen & 0x3) != 0) goto bad; if (dp->d_ino == 0) return (1); size = DIRSIZ(0, dp); namlen = dp->d_namlen; type = dp->d_type; if (dp->d_reclen < size || idesc->id_filesize < size || namlen == 0 || type > 15) goto bad; for (cp = dp->d_name, size = 0; size < namlen; size++) if (*cp == '\0' || (*cp++ == '/')) goto bad; if (*cp != '\0') goto bad; return (1); bad: if (debug) printf("Bad dir: ino %d reclen %d namlen %d type %d name %s\n", dp->d_ino, dp->d_reclen, dp->d_namlen, dp->d_type, dp->d_name); return (0); } void direrror(ino_t ino, const char *errmesg) { fileerror(ino, ino, errmesg); } void fileerror(ino_t cwd, ino_t ino, const char *errmesg) { union dinode *dp; char pathbuf[MAXPATHLEN + 1]; pwarn("%s ", errmesg); - pinode(ino); - printf("\n"); - getpathname(pathbuf, cwd, ino); if (ino < UFS_ROOTINO || ino > maxino) { - pfatal("NAME=%s\n", pathbuf); + pfatal("out-of-range inode number %ju", (uintmax_t)ino); return; } dp = ginode(ino); + prtinode(ino, dp); + printf("\n"); + getpathname(pathbuf, cwd, ino); if (ftypeok(dp)) pfatal("%s=%s\n", (DIP(dp, di_mode) & IFMT) == IFDIR ? "DIR" : "FILE", pathbuf); else pfatal("NAME=%s\n", pathbuf); } void adjust(struct inodesc *idesc, int lcnt) { union dinode *dp; int saveresolved; dp = ginode(idesc->id_number); if (DIP(dp, di_nlink) == lcnt) { /* * If we have not hit any unresolved problems, are running * in preen mode, and are on a file system using soft updates, * then just toss any partially allocated files. */ if (resolved && (preen || bkgrdflag) && usedsoftdep) { clri(idesc, "UNREF", 1); return; } else { /* * The file system can be marked clean even if * a file is not linked up, but is cleared. * Hence, resolved should not be cleared when * linkup is answered no, but clri is answered yes. */ saveresolved = resolved; if (linkup(idesc->id_number, (ino_t)0, NULL) == 0) { resolved = saveresolved; clri(idesc, "UNREF", 0); return; } /* * Account for the new reference created by linkup(). */ dp = ginode(idesc->id_number); lcnt--; } } if (lcnt != 0) { pwarn("LINK COUNT %s", (lfdir == idesc->id_number) ? lfname : ((DIP(dp, di_mode) & IFMT) == IFDIR ? "DIR" : "FILE")); - pinode(idesc->id_number); + prtinode(idesc->id_number, dp); printf(" COUNT %d SHOULD BE %d", DIP(dp, di_nlink), DIP(dp, di_nlink) - lcnt); if (preen || usedsoftdep) { if (lcnt < 0) { printf("\n"); pfatal("LINK COUNT INCREASING"); } if (preen) printf(" (ADJUSTED)\n"); } if (preen || reply("ADJUST") == 1) { if (bkgrdflag == 0) { DIP_SET(dp, di_nlink, DIP(dp, di_nlink) - lcnt); inodirty(dp); } else { cmd.value = idesc->id_number; cmd.size = -lcnt; if (debug) printf("adjrefcnt ino %ld amt %lld\n", (long)cmd.value, (long long)cmd.size); if (sysctl(adjrefcnt, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST INODE", cmd.value); } } } } static int mkentry(struct inodesc *idesc) { struct direct *dirp = idesc->id_dirp; struct direct newent; int newlen, oldlen; newent.d_namlen = strlen(idesc->id_name); newlen = DIRSIZ(0, &newent); if (dirp->d_ino != 0) oldlen = DIRSIZ(0, dirp); else oldlen = 0; if (dirp->d_reclen - oldlen < newlen) return (KEEPON); newent.d_reclen = dirp->d_reclen - oldlen; dirp->d_reclen = oldlen; dirp = (struct direct *)(((char *)dirp) + oldlen); dirp->d_ino = idesc->id_parent; /* ino to be entered is in id_parent */ dirp->d_reclen = newent.d_reclen; dirp->d_type = inoinfo(idesc->id_parent)->ino_type; dirp->d_namlen = newent.d_namlen; memmove(dirp->d_name, idesc->id_name, (size_t)newent.d_namlen + 1); return (ALTERED|STOP); } static int chgino(struct inodesc *idesc) { struct direct *dirp = idesc->id_dirp; if (memcmp(dirp->d_name, idesc->id_name, (int)dirp->d_namlen + 1)) return (KEEPON); dirp->d_ino = idesc->id_parent; dirp->d_type = inoinfo(idesc->id_parent)->ino_type; return (ALTERED|STOP); } int linkup(ino_t orphan, ino_t parentdir, char *name) { union dinode *dp; int lostdir; ino_t oldlfdir; struct inodesc idesc; char tempname[BUFSIZ]; memset(&idesc, 0, sizeof(struct inodesc)); dp = ginode(orphan); lostdir = (DIP(dp, di_mode) & IFMT) == IFDIR; pwarn("UNREF %s ", lostdir ? "DIR" : "FILE"); - pinode(orphan); + prtinode(orphan, dp); + printf("\n"); if (preen && DIP(dp, di_size) == 0) return (0); if (cursnapshot != 0) { pfatal("FILE LINKUP IN SNAPSHOT"); return (0); } if (preen) printf(" (RECONNECTED)\n"); else if (reply("RECONNECT") == 0) return (0); if (lfdir == 0) { dp = ginode(UFS_ROOTINO); idesc.id_name = strdup(lfname); idesc.id_type = DATA; idesc.id_func = findino; idesc.id_number = UFS_ROOTINO; if ((ckinode(dp, &idesc) & FOUND) != 0) { lfdir = idesc.id_parent; } else { pwarn("NO lost+found DIRECTORY"); if (preen || reply("CREATE")) { lfdir = allocdir(UFS_ROOTINO, (ino_t)0, lfmode); if (lfdir != 0) { if (makeentry(UFS_ROOTINO, lfdir, lfname) != 0) { numdirs++; if (preen) printf(" (CREATED)\n"); } else { freedir(lfdir, UFS_ROOTINO); lfdir = 0; if (preen) printf("\n"); } } } } if (lfdir == 0) { pfatal("SORRY. CANNOT CREATE lost+found DIRECTORY"); printf("\n\n"); return (0); } } dp = ginode(lfdir); if ((DIP(dp, di_mode) & IFMT) != IFDIR) { pfatal("lost+found IS NOT A DIRECTORY"); if (reply("REALLOCATE") == 0) return (0); oldlfdir = lfdir; if ((lfdir = allocdir(UFS_ROOTINO, (ino_t)0, lfmode)) == 0) { pfatal("SORRY. CANNOT CREATE lost+found DIRECTORY\n\n"); return (0); } if ((changeino(UFS_ROOTINO, lfname, lfdir) & ALTERED) == 0) { pfatal("SORRY. CANNOT CREATE lost+found DIRECTORY\n\n"); return (0); } inodirty(dp); idesc.id_type = ADDR; idesc.id_func = pass4check; idesc.id_number = oldlfdir; adjust(&idesc, inoinfo(oldlfdir)->ino_linkcnt + 1); inoinfo(oldlfdir)->ino_linkcnt = 0; dp = ginode(lfdir); } if (inoinfo(lfdir)->ino_state != DFOUND) { pfatal("SORRY. NO lost+found DIRECTORY\n\n"); return (0); } (void)lftempname(tempname, orphan); if (makeentry(lfdir, orphan, (name ? name : tempname)) == 0) { pfatal("SORRY. NO SPACE IN lost+found DIRECTORY"); printf("\n\n"); return (0); } inoinfo(orphan)->ino_linkcnt--; if (lostdir) { if ((changeino(orphan, "..", lfdir) & ALTERED) == 0 && parentdir != (ino_t)-1) (void)makeentry(orphan, lfdir, ".."); dp = ginode(lfdir); DIP_SET(dp, di_nlink, DIP(dp, di_nlink) + 1); inodirty(dp); inoinfo(lfdir)->ino_linkcnt++; pwarn("DIR I=%lu CONNECTED. ", (u_long)orphan); if (parentdir != (ino_t)-1) { printf("PARENT WAS I=%lu\n", (u_long)parentdir); /* * The parent directory, because of the ordering * guarantees, has had the link count incremented * for the child, but no entry was made. This * fixes the parent link count so that fsck does * not need to be rerun. */ inoinfo(parentdir)->ino_linkcnt++; } if (preen == 0) printf("\n"); } return (1); } /* * fix an entry in a directory. */ int changeino(ino_t dir, const char *name, ino_t newnum) { struct inodesc idesc; memset(&idesc, 0, sizeof(struct inodesc)); idesc.id_type = DATA; idesc.id_func = chgino; idesc.id_number = dir; idesc.id_fix = DONTKNOW; idesc.id_name = strdup(name); idesc.id_parent = newnum; /* new value for name */ return (ckinode(ginode(dir), &idesc)); } /* * make an entry in a directory */ int makeentry(ino_t parent, ino_t ino, const char *name) { union dinode *dp; struct inodesc idesc; char pathbuf[MAXPATHLEN + 1]; if (parent < UFS_ROOTINO || parent >= maxino || ino < UFS_ROOTINO || ino >= maxino) return (0); memset(&idesc, 0, sizeof(struct inodesc)); idesc.id_type = DATA; idesc.id_func = mkentry; idesc.id_number = parent; idesc.id_parent = ino; /* this is the inode to enter */ idesc.id_fix = DONTKNOW; idesc.id_name = strdup(name); dp = ginode(parent); if (DIP(dp, di_size) % DIRBLKSIZ) { DIP_SET(dp, di_size, roundup(DIP(dp, di_size), DIRBLKSIZ)); inodirty(dp); } if ((ckinode(dp, &idesc) & ALTERED) != 0) return (1); getpathname(pathbuf, parent, parent); dp = ginode(parent); if (expanddir(dp, pathbuf) == 0) return (0); return (ckinode(dp, &idesc) & ALTERED); } /* * Attempt to expand the size of a directory */ static int expanddir(union dinode *dp, char *name) { ufs2_daddr_t lastbn, newblk; struct bufarea *bp; char *cp, firstblk[DIRBLKSIZ]; lastbn = lblkno(&sblock, DIP(dp, di_size)); if (lastbn >= UFS_NDADDR - 1 || DIP(dp, di_db[lastbn]) == 0 || DIP(dp, di_size) == 0) return (0); if ((newblk = allocblk(sblock.fs_frag)) == 0) return (0); DIP_SET(dp, di_db[lastbn + 1], DIP(dp, di_db[lastbn])); DIP_SET(dp, di_db[lastbn], newblk); DIP_SET(dp, di_size, DIP(dp, di_size) + sblock.fs_bsize); DIP_SET(dp, di_blocks, DIP(dp, di_blocks) + btodb(sblock.fs_bsize)); bp = getdirblk(DIP(dp, di_db[lastbn + 1]), sblksize(&sblock, DIP(dp, di_size), lastbn + 1)); if (bp->b_errs) goto bad; memmove(firstblk, bp->b_un.b_buf, DIRBLKSIZ); bp = getdirblk(newblk, sblock.fs_bsize); if (bp->b_errs) goto bad; memmove(bp->b_un.b_buf, firstblk, DIRBLKSIZ); for (cp = &bp->b_un.b_buf[DIRBLKSIZ]; cp < &bp->b_un.b_buf[sblock.fs_bsize]; cp += DIRBLKSIZ) memmove(cp, &emptydir, sizeof emptydir); dirty(bp); bp = getdirblk(DIP(dp, di_db[lastbn + 1]), sblksize(&sblock, DIP(dp, di_size), lastbn + 1)); if (bp->b_errs) goto bad; memmove(bp->b_un.b_buf, &emptydir, sizeof emptydir); pwarn("NO SPACE LEFT IN %s", name); if (preen) printf(" (EXPANDED)\n"); else if (reply("EXPAND") == 0) goto bad; dirty(bp); inodirty(dp); return (1); bad: DIP_SET(dp, di_db[lastbn], DIP(dp, di_db[lastbn + 1])); DIP_SET(dp, di_db[lastbn + 1], 0); DIP_SET(dp, di_size, DIP(dp, di_size) - sblock.fs_bsize); DIP_SET(dp, di_blocks, DIP(dp, di_blocks) - btodb(sblock.fs_bsize)); freeblk(newblk, sblock.fs_frag); return (0); } /* * allocate a new directory */ ino_t allocdir(ino_t parent, ino_t request, int mode) { ino_t ino; char *cp; union dinode *dp; struct bufarea *bp; struct inoinfo *inp; struct dirtemplate *dirp; ino = allocino(request, IFDIR|mode); dirp = &dirhead; dirp->dot_ino = ino; dirp->dotdot_ino = parent; dp = ginode(ino); bp = getdirblk(DIP(dp, di_db[0]), sblock.fs_fsize); if (bp->b_errs) { freeino(ino); return (0); } memmove(bp->b_un.b_buf, dirp, sizeof(struct dirtemplate)); for (cp = &bp->b_un.b_buf[DIRBLKSIZ]; cp < &bp->b_un.b_buf[sblock.fs_fsize]; cp += DIRBLKSIZ) memmove(cp, &emptydir, sizeof emptydir); dirty(bp); DIP_SET(dp, di_nlink, 2); inodirty(dp); if (ino == UFS_ROOTINO) { inoinfo(ino)->ino_linkcnt = DIP(dp, di_nlink); cacheino(dp, ino); return(ino); } if (!INO_IS_DVALID(parent)) { freeino(ino); return (0); } cacheino(dp, ino); inp = getinoinfo(ino); inp->i_parent = parent; inp->i_dotdot = parent; inoinfo(ino)->ino_state = inoinfo(parent)->ino_state; if (inoinfo(ino)->ino_state == DSTATE) { inoinfo(ino)->ino_linkcnt = DIP(dp, di_nlink); inoinfo(parent)->ino_linkcnt++; } dp = ginode(parent); DIP_SET(dp, di_nlink, DIP(dp, di_nlink) + 1); inodirty(dp); return (ino); } /* * free a directory inode */ static void freedir(ino_t ino, ino_t parent) { union dinode *dp; if (ino != parent) { dp = ginode(parent); DIP_SET(dp, di_nlink, DIP(dp, di_nlink) - 1); inodirty(dp); } freeino(ino); } /* * generate a temporary name for the lost+found directory. */ static int lftempname(char *bufp, ino_t ino) { ino_t in; char *cp; int namlen; cp = bufp + 2; for (in = maxino; in > 0; in /= 10) cp++; *--cp = 0; namlen = cp - bufp; in = ino; while (cp > bufp) { *--cp = (in % 10) + '0'; in /= 10; } *cp = '#'; return (namlen); } /* * Get a directory block. * Insure that it is held until another is requested. */ static struct bufarea * getdirblk(ufs2_daddr_t blkno, long size) { if (pdirbp != NULL) pdirbp->b_flags &= ~B_INUSE; pdirbp = getdatablk(blkno, size, BT_DIRDATA); return (pdirbp); } Index: projects/clang700-import/sbin/fsck_ffs/fsck.h =================================================================== --- projects/clang700-import/sbin/fsck_ffs/fsck.h (revision 340426) +++ projects/clang700-import/sbin/fsck_ffs/fsck.h (revision 340427) @@ -1,480 +1,480 @@ /*- * SPDX-License-Identifier: BSD-3-Clause and BSD-2-Clause-FreeBSD * * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Marshall * Kirk McKusick and Network Associates Laboratories, the Security * Research Division of Network Associates, Inc. under DARPA/SPAWAR * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS * research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)fsck.h 8.4 (Berkeley) 5/9/95 * $FreeBSD$ */ #ifndef _FSCK_H_ #define _FSCK_H_ #include #include #include #include #define MAXDUP 10 /* limit on dup blks (per inode) */ #define MAXBAD 10 /* limit on bad blks (per inode) */ #define MINBUFS 10 /* minimum number of buffers required */ #define MAXBUFS 40 /* maximum space to allocate to buffers */ #define INOBUFSIZE 64*1024 /* size of buffer to read inodes in pass1 */ #define ZEROBUFSIZE (dev_bsize * 128) /* size of zero buffer used by -Z */ union dinode { struct ufs1_dinode dp1; struct ufs2_dinode dp2; }; #define DIP(dp, field) \ ((sblock.fs_magic == FS_UFS1_MAGIC) ? \ (dp)->dp1.field : (dp)->dp2.field) #define DIP_SET(dp, field, val) do { \ if (sblock.fs_magic == FS_UFS1_MAGIC) \ (dp)->dp1.field = (val); \ else \ (dp)->dp2.field = (val); \ } while (0) /* * Each inode on the file system is described by the following structure. * The linkcnt is initially set to the value in the inode. Each time it * is found during the descent in passes 2, 3, and 4 the count is * decremented. Any inodes whose count is non-zero after pass 4 needs to * have its link count adjusted by the value remaining in ino_linkcnt. */ struct inostat { char ino_state; /* state of inode, see below */ char ino_type; /* type of inode */ short ino_linkcnt; /* number of links not found */ }; /* * Inode states. */ #define USTATE 0x1 /* inode not allocated */ #define FSTATE 0x2 /* inode is file */ #define FZLINK 0x3 /* inode is file with a link count of zero */ #define DSTATE 0x4 /* inode is directory */ #define DZLINK 0x5 /* inode is directory with a zero link count */ #define DFOUND 0x6 /* directory found during descent */ /* 0x7 UNUSED - see S_IS_DVALID() definition */ #define DCLEAR 0x8 /* directory is to be cleared */ #define FCLEAR 0x9 /* file is to be cleared */ /* DUNFOUND === (state == DSTATE || state == DZLINK) */ #define S_IS_DUNFOUND(state) (((state) & ~0x1) == DSTATE) /* DVALID === (state == DSTATE || state == DZLINK || state == DFOUND) */ #define S_IS_DVALID(state) (((state) & ~0x3) == DSTATE) #define INO_IS_DUNFOUND(ino) S_IS_DUNFOUND(inoinfo(ino)->ino_state) #define INO_IS_DVALID(ino) S_IS_DVALID(inoinfo(ino)->ino_state) /* * Inode state information is contained on per cylinder group lists * which are described by the following structure. */ struct inostatlist { long il_numalloced; /* number of inodes allocated in this cg */ struct inostat *il_stat;/* inostat info for this cylinder group */ } *inostathead; /* * buffer cache structure. */ struct bufarea { TAILQ_ENTRY(bufarea) b_list; /* buffer list */ ufs2_daddr_t b_bno; int b_size; int b_errs; int b_flags; int b_type; union { char *b_buf; /* buffer space */ ufs1_daddr_t *b_indir1; /* UFS1 indirect block */ ufs2_daddr_t *b_indir2; /* UFS2 indirect block */ struct fs *b_fs; /* super block */ struct cg *b_cg; /* cylinder group */ struct ufs1_dinode *b_dinode1; /* UFS1 inode block */ struct ufs2_dinode *b_dinode2; /* UFS2 inode block */ } b_un; char b_dirty; }; #define IBLK(bp, i) \ ((sblock.fs_magic == FS_UFS1_MAGIC) ? \ (bp)->b_un.b_indir1[i] : (bp)->b_un.b_indir2[i]) #define IBLK_SET(bp, i, val) do { \ if (sblock.fs_magic == FS_UFS1_MAGIC) \ (bp)->b_un.b_indir1[i] = (val); \ else \ (bp)->b_un.b_indir2[i] = (val); \ } while (0) /* * Buffer flags */ #define B_INUSE 0x00000001 /* Buffer is in use */ /* * Type of data in buffer */ #define BT_UNKNOWN 0 /* Buffer holds a superblock */ #define BT_SUPERBLK 1 /* Buffer holds a superblock */ #define BT_CYLGRP 2 /* Buffer holds a cylinder group map */ #define BT_LEVEL1 3 /* Buffer holds single level indirect */ #define BT_LEVEL2 4 /* Buffer holds double level indirect */ #define BT_LEVEL3 5 /* Buffer holds triple level indirect */ #define BT_EXTATTR 6 /* Buffer holds external attribute data */ #define BT_INODES 7 /* Buffer holds external attribute data */ #define BT_DIRDATA 8 /* Buffer holds directory data */ #define BT_DATA 9 /* Buffer holds user data */ #define BT_NUMBUFTYPES 10 #define BT_NAMES { \ "unknown", \ "Superblock", \ "Cylinder Group", \ "Single Level Indirect", \ "Double Level Indirect", \ "Triple Level Indirect", \ "External Attribute", \ "Inode Block", \ "Directory Contents", \ "User Data" } extern long readcnt[BT_NUMBUFTYPES]; extern long totalreadcnt[BT_NUMBUFTYPES]; extern struct timespec readtime[BT_NUMBUFTYPES]; extern struct timespec totalreadtime[BT_NUMBUFTYPES]; extern struct timespec startprog; extern struct bufarea sblk; /* file system superblock */ extern struct bufarea *pdirbp; /* current directory contents */ extern struct bufarea *pbp; /* current inode block */ #define dirty(bp) do { \ if (fswritefd < 0) \ pfatal("SETTING DIRTY FLAG IN READ_ONLY MODE\n"); \ else \ (bp)->b_dirty = 1; \ } while (0) #define initbarea(bp, type) do { \ (bp)->b_dirty = 0; \ (bp)->b_bno = (ufs2_daddr_t)-1; \ (bp)->b_flags = 0; \ (bp)->b_type = type; \ } while (0) #define sbdirty() dirty(&sblk) #define sblock (*sblk.b_un.b_fs) enum fixstate {DONTKNOW, NOFIX, FIX, IGNORE}; extern ino_t cursnapshot; struct inodesc { enum fixstate id_fix; /* policy on fixing errors */ int (*id_func)(struct inodesc *); /* function to be applied to blocks of inode */ ino_t id_number; /* inode number described */ ino_t id_parent; /* for DATA nodes, their parent */ ufs_lbn_t id_lbn; /* logical block number of current block */ ufs2_daddr_t id_blkno; /* current block number being examined */ int id_numfrags; /* number of frags contained in block */ off_t id_filesize; /* for DATA nodes, the size of the directory */ ufs2_daddr_t id_entryno;/* for DATA nodes, current entry number */ int id_loc; /* for DATA nodes, current location in dir */ struct direct *id_dirp; /* for DATA nodes, ptr to current entry */ char *id_name; /* for DATA nodes, name to find or enter */ char id_type; /* type of descriptor, DATA or ADDR */ }; /* file types */ #define DATA 1 /* a directory */ #define SNAP 2 /* a snapshot */ #define ADDR 3 /* anything but a directory or a snapshot */ /* * Linked list of duplicate blocks. * * The list is composed of two parts. The first part of the * list (from duplist through the node pointed to by muldup) * contains a single copy of each duplicate block that has been * found. The second part of the list (from muldup to the end) * contains duplicate blocks that have been found more than once. * To check if a block has been found as a duplicate it is only * necessary to search from duplist through muldup. To find the * total number of times that a block has been found as a duplicate * the entire list must be searched for occurrences of the block * in question. The following diagram shows a sample list where * w (found twice), x (found once), y (found three times), and z * (found once) are duplicate block numbers: * * w -> y -> x -> z -> y -> w -> y * ^ ^ * | | * duplist muldup */ struct dups { struct dups *next; ufs2_daddr_t dup; }; struct dups *duplist; /* head of dup list */ struct dups *muldup; /* end of unique duplicate dup block numbers */ /* * Inode cache data structures. */ struct inoinfo { struct inoinfo *i_nexthash; /* next entry in hash chain */ ino_t i_number; /* inode number of this entry */ ino_t i_parent; /* inode number of parent */ ino_t i_dotdot; /* inode number of `..' */ size_t i_isize; /* size of inode */ u_int i_numblks; /* size of block array in bytes */ ufs2_daddr_t i_blks[1]; /* actually longer */ } **inphead, **inpsort; extern long dirhash, inplast; extern unsigned long numdirs, listmax; extern long countdirs; /* number of directories we actually found */ #define MIBSIZE 3 /* size of fsck sysctl MIBs */ extern int adjrefcnt[MIBSIZE]; /* MIB command to adjust inode reference cnt */ extern int adjblkcnt[MIBSIZE]; /* MIB command to adjust inode block count */ extern int adjndir[MIBSIZE]; /* MIB command to adjust number of directories */ extern int adjnbfree[MIBSIZE]; /* MIB command to adjust number of free blocks */ extern int adjnifree[MIBSIZE]; /* MIB command to adjust number of free inodes */ extern int adjnffree[MIBSIZE]; /* MIB command to adjust number of free frags */ extern int adjnumclusters[MIBSIZE]; /* MIB command to adjust number of free clusters */ extern int freefiles[MIBSIZE]; /* MIB command to free a set of files */ extern int freedirs[MIBSIZE]; /* MIB command to free a set of directories */ extern int freeblks[MIBSIZE]; /* MIB command to free a set of data blocks */ extern struct fsck_cmd cmd; /* sysctl file system update commands */ extern char snapname[BUFSIZ]; /* when doing snapshots, the name of the file */ extern char *cdevname; /* name of device being checked */ extern long dev_bsize; /* computed value of DEV_BSIZE */ extern long secsize; /* actual disk sector size */ extern u_int real_dev_bsize; /* actual disk sector size, not overridden */ extern char nflag; /* assume a no response */ extern char yflag; /* assume a yes response */ extern int bkgrdflag; /* use a snapshot to run on an active system */ extern off_t bflag; /* location of alternate super block */ extern int debug; /* output debugging info */ extern int Eflag; /* delete empty data blocks */ extern int Zflag; /* zero empty data blocks */ extern int inoopt; /* trim out unused inodes */ extern char ckclean; /* only do work if not cleanly unmounted */ extern int cvtlevel; /* convert to newer file system format */ extern int ckhashadd; /* check hashes to be added */ extern int bkgrdcheck; /* determine if background check is possible */ extern int bkgrdsumadj; /* whether the kernel have ability to adjust superblock summary */ extern char usedsoftdep; /* just fix soft dependency inconsistencies */ extern char preen; /* just fix normal inconsistencies */ extern char rerun; /* rerun fsck. Only used in non-preen mode */ extern int returntosingle; /* 1 => return to single user mode on exit */ extern char resolved; /* cleared if unresolved changes => not clean */ extern char havesb; /* superblock has been read */ extern char skipclean; /* skip clean file systems if preening */ extern int fsmodified; /* 1 => write done to file system */ extern int fsreadfd; /* file descriptor for reading file system */ extern int fswritefd; /* file descriptor for writing file system */ extern struct uufsd disk; /* libufs user-ufs disk structure */ extern int surrender; /* Give up if reads fail */ extern int wantrestart; /* Restart fsck on early termination */ extern ufs2_daddr_t maxfsblock; /* number of blocks in the file system */ extern char *blockmap; /* ptr to primary blk allocation map */ extern ino_t maxino; /* number of inodes in file system */ extern ino_t lfdir; /* lost & found directory inode number */ extern const char *lfname; /* lost & found directory name */ extern int lfmode; /* lost & found directory creation mode */ extern ufs2_daddr_t n_blks; /* number of blocks in use */ extern ino_t n_files; /* number of files in use */ extern volatile sig_atomic_t got_siginfo; /* received a SIGINFO */ extern volatile sig_atomic_t got_sigalarm; /* received a SIGALRM */ #define clearinode(dp) \ if (sblock.fs_magic == FS_UFS1_MAGIC) { \ (dp)->dp1 = ufs1_zino; \ } else { \ (dp)->dp2 = ufs2_zino; \ } extern struct ufs1_dinode ufs1_zino; extern struct ufs2_dinode ufs2_zino; #define setbmap(blkno) setbit(blockmap, blkno) #define testbmap(blkno) isset(blockmap, blkno) #define clrbmap(blkno) clrbit(blockmap, blkno) #define STOP 0x01 #define SKIP 0x02 #define KEEPON 0x04 #define ALTERED 0x08 #define FOUND 0x10 #define EEXIT 8 /* Standard error exit. */ #define ERERUN 16 /* fsck needs to be re-run. */ #define ERESTART -1 int flushentry(void); /* * Wrapper for malloc() that flushes the cylinder group cache to try * to get space. */ static inline void* Malloc(size_t size) { void *retval; while ((retval = malloc(size)) == NULL) if (flushentry() == 0) break; return (retval); } /* * Wrapper for calloc() that flushes the cylinder group cache to try * to get space. */ static inline void* Calloc(size_t cnt, size_t size) { void *retval; while ((retval = calloc(cnt, size)) == NULL) if (flushentry() == 0) break; return (retval); } struct fstab; void adjust(struct inodesc *, int lcnt); ufs2_daddr_t allocblk(long frags); ino_t allocdir(ino_t parent, ino_t request, int mode); ino_t allocino(ino_t request, int type); void blkerror(ino_t ino, const char *type, ufs2_daddr_t blk); char *blockcheck(char *name); int blread(int fd, char *buf, ufs2_daddr_t blk, long size); void bufinit(void); void blwrite(int fd, char *buf, ufs2_daddr_t blk, ssize_t size); void blerase(int fd, ufs2_daddr_t blk, long size); void blzero(int fd, ufs2_daddr_t blk, long size); void cacheino(union dinode *dp, ino_t inumber); void catch(int); void catchquit(int); int changeino(ino_t dir, const char *name, ino_t newnum); int check_cgmagic(int cg, struct bufarea *cgbp); int chkrange(ufs2_daddr_t blk, int cnt); void ckfini(int markclean); int ckinode(union dinode *dp, struct inodesc *); void clri(struct inodesc *, const char *type, int flag); int clearentry(struct inodesc *); void direrror(ino_t ino, const char *errmesg); int dirscan(struct inodesc *); int dofix(struct inodesc *, const char *msg); int eascan(struct inodesc *, struct ufs2_dinode *dp); void fileerror(ino_t cwd, ino_t ino, const char *errmesg); void finalIOstats(void); int findino(struct inodesc *); int findname(struct inodesc *); void flush(int fd, struct bufarea *bp); void freeblk(ufs2_daddr_t blkno, long frags); void freeino(ino_t ino); void freeinodebuf(void); void fsutilinit(void); int ftypeok(union dinode *dp); void getblk(struct bufarea *bp, ufs2_daddr_t blk, long size); struct bufarea *cglookup(int cg); struct bufarea *getdatablk(ufs2_daddr_t blkno, long size, int type); struct inoinfo *getinoinfo(ino_t inumber); union dinode *getnextinode(ino_t inumber, int rebuildcg); void getpathname(char *namebuf, ino_t curdir, ino_t ino); union dinode *ginode(ino_t inumber); void infohandler(int sig); void alarmhandler(int sig); void inocleanup(void); void inodirty(union dinode *); struct inostat *inoinfo(ino_t inum); void IOstats(char *what); int linkup(ino_t orphan, ino_t parentdir, char *name); int makeentry(ino_t parent, ino_t ino, const char *name); void panic(const char *fmt, ...) __printflike(1, 2); void pass1(void); void pass1b(void); int pass1check(struct inodesc *); void pass2(void); void pass3(void); void pass4(void); int pass4check(struct inodesc *); void pass5(void); void pfatal(const char *fmt, ...) __printflike(1, 2); -void pinode(ino_t ino); void propagate(void); +void prtinode(ino_t ino, union dinode *dp); void pwarn(const char *fmt, ...) __printflike(1, 2); int readsb(int listerr); int reply(const char *question); void rwerror(const char *mesg, ufs2_daddr_t blk); void sblock_init(void); void setinodebuf(ino_t); int setup(char *dev); void gjournal_check(const char *filesys); int suj_check(const char *filesys); void update_maps(struct cg *, struct cg*, int); void fsckinit(void); #endif /* !_FSCK_H_ */ Index: projects/clang700-import/sbin/fsck_ffs/gjournal.c =================================================================== --- projects/clang700-import/sbin/fsck_ffs/gjournal.c (revision 340426) +++ projects/clang700-import/sbin/fsck_ffs/gjournal.c (revision 340427) @@ -1,507 +1,507 @@ /*- * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fsck.h" struct cgchain { union { struct cg cgcu_cg; char cgcu_buf[MAXBSIZE]; } cgc_union; int cgc_busy; int cgc_dirty; LIST_ENTRY(cgchain) cgc_next; }; #define cgc_cg cgc_union.cgcu_cg #define MAX_CACHED_CGS 1024 static unsigned ncgs = 0; static LIST_HEAD(, cgchain) cglist = LIST_HEAD_INITIALIZER(cglist); static const char *devnam; static struct uufsd *diskp = NULL; static struct fs *fs = NULL; struct ufs2_dinode ufs2_zino; static void putcgs(void); /* * Return cylinder group from the cache or load it if it is not in the * cache yet. * Don't cache more than MAX_CACHED_CGS cylinder groups. */ static struct cgchain * getcg(int cg) { struct cgchain *cgc; assert(diskp != NULL && fs != NULL); LIST_FOREACH(cgc, &cglist, cgc_next) { if (cgc->cgc_cg.cg_cgx == cg) { //printf("%s: Found cg=%d\n", __func__, cg); return (cgc); } } /* * Our cache is full? Let's clean it up. */ if (ncgs >= MAX_CACHED_CGS) { //printf("%s: Flushing CGs.\n", __func__); putcgs(); } cgc = malloc(sizeof(*cgc)); if (cgc == NULL) { /* * Cannot allocate memory? * Let's put all currently loaded and not busy cylinder groups * on disk and try again. */ //printf("%s: No memory, flushing CGs.\n", __func__); putcgs(); cgc = malloc(sizeof(*cgc)); if (cgc == NULL) err(1, "malloc(%zu)", sizeof(*cgc)); } if (cgget(diskp, cg, &cgc->cgc_cg) == -1) err(1, "cgget(%d)", cg); cgc->cgc_busy = 0; cgc->cgc_dirty = 0; LIST_INSERT_HEAD(&cglist, cgc, cgc_next); ncgs++; //printf("%s: Read cg=%d\n", __func__, cg); return (cgc); } /* * Mark cylinder group as dirty - it will be written back on putcgs(). */ static void dirtycg(struct cgchain *cgc) { cgc->cgc_dirty = 1; } /* * Mark cylinder group as busy - it will not be freed on putcgs(). */ static void busycg(struct cgchain *cgc) { cgc->cgc_busy = 1; } /* * Unmark the given cylinder group as busy. */ static void unbusycg(struct cgchain *cgc) { cgc->cgc_busy = 0; } /* * Write back all dirty cylinder groups. * Free all non-busy cylinder groups. */ static void putcgs(void) { struct cgchain *cgc, *cgc2; assert(diskp != NULL && fs != NULL); LIST_FOREACH_SAFE(cgc, &cglist, cgc_next, cgc2) { if (cgc->cgc_busy) continue; LIST_REMOVE(cgc, cgc_next); ncgs--; if (cgc->cgc_dirty) { if (cgput(diskp, &cgc->cgc_cg) == -1) err(1, "cgput(%d)", cgc->cgc_cg.cg_cgx); //printf("%s: Wrote cg=%d\n", __func__, // cgc->cgc_cg.cg_cgx); } free(cgc); } } #if 0 /* * Free all non-busy cylinder groups without storing the dirty ones. */ static void cancelcgs(void) { struct cgchain *cgc; assert(diskp != NULL && fs != NULL); while ((cgc = LIST_FIRST(&cglist)) != NULL) { if (cgc->cgc_busy) continue; LIST_REMOVE(cgc, cgc_next); //printf("%s: Canceled cg=%d\n", __func__, cgc->cgc_cg.cg_cgx); free(cgc); } } #endif /* * Open the given provider, load superblock. */ static void opendisk(void) { if (diskp != NULL) return; diskp = &disk; if (ufs_disk_fillout(diskp, devnam) == -1) { err(1, "ufs_disk_fillout(%s) failed: %s", devnam, diskp->d_error); } fs = &diskp->d_fs; } /* * Mark file system as clean, write the super-block back, close the disk. */ static void closedisk(void) { fs->fs_clean = 1; if (sbwrite(diskp, 0) == -1) err(1, "sbwrite(%s)", devnam); if (ufs_disk_close(diskp) == -1) err(1, "ufs_disk_close(%s)", devnam); free(diskp); diskp = NULL; fs = NULL; } static void blkfree(ufs2_daddr_t bno, long size) { struct cgchain *cgc; struct cg *cgp; ufs1_daddr_t fragno, cgbno; int i, cg, blk, frags, bbase; u_int8_t *blksfree; cg = dtog(fs, bno); cgc = getcg(cg); dirtycg(cgc); cgp = &cgc->cgc_cg; cgbno = dtogd(fs, bno); blksfree = cg_blksfree(cgp); if (size == fs->fs_bsize) { fragno = fragstoblks(fs, cgbno); if (!ffs_isfreeblock(fs, blksfree, fragno)) assert(!"blkfree: freeing free block"); ffs_setblock(fs, blksfree, fragno); ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } else { bbase = cgbno - fragnum(fs, cgbno); /* * decrement the counts associated with the old frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1); /* * deallocate the fragment */ frags = numfrags(fs, size); for (i = 0; i < frags; i++) { if (isset(blksfree, cgbno + i)) assert(!"blkfree: freeing free frag"); setbit(blksfree, cgbno + i); } cgp->cg_cs.cs_nffree += i; fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; /* * add back in counts associated with the new frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1); /* * if a complete block has been reassembled, account for it */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, blksfree, fragno)) { cgp->cg_cs.cs_nffree -= fs->fs_frag; fs->fs_cstotal.cs_nffree -= fs->fs_frag; fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } } } /* * Recursively free all indirect blocks. */ static void freeindir(ufs2_daddr_t blk, int level) { char sblks[MAXBSIZE]; ufs2_daddr_t *blks; int i; if (bread(diskp, fsbtodb(fs, blk), (void *)&sblks, (size_t)fs->fs_bsize) == -1) err(1, "bread: %s", diskp->d_error); blks = (ufs2_daddr_t *)&sblks; for (i = 0; i < NINDIR(fs); i++) { if (blks[i] == 0) break; if (level == 0) blkfree(blks[i], fs->fs_bsize); else freeindir(blks[i], level - 1); } blkfree(blk, fs->fs_bsize); } #define dblksize(fs, dino, lbn) \ ((dino)->di_size >= smalllblktosize(fs, (lbn) + 1) \ ? (fs)->fs_bsize \ : fragroundup(fs, blkoff(fs, (dino)->di_size))) /* * Free all blocks associated with the given inode. */ static void clear_inode(struct ufs2_dinode *dino) { ufs2_daddr_t bn; int extblocks, i, level; off_t osize; long bsize; extblocks = 0; if (fs->fs_magic == FS_UFS2_MAGIC && dino->di_extsize > 0) extblocks = btodb(fragroundup(fs, dino->di_extsize)); /* deallocate external attributes blocks */ if (extblocks > 0) { osize = dino->di_extsize; dino->di_blocks -= extblocks; dino->di_extsize = 0; for (i = 0; i < UFS_NXADDR; i++) { if (dino->di_extb[i] == 0) continue; blkfree(dino->di_extb[i], sblksize(fs, osize, i)); } } #define SINGLE 0 /* index of single indirect block */ #define DOUBLE 1 /* index of double indirect block */ #define TRIPLE 2 /* index of triple indirect block */ /* deallocate indirect blocks */ for (level = SINGLE; level <= TRIPLE; level++) { if (dino->di_ib[level] == 0) break; freeindir(dino->di_ib[level], level); } /* deallocate direct blocks and fragments */ for (i = 0; i < UFS_NDADDR; i++) { bn = dino->di_db[i]; if (bn == 0) continue; bsize = dblksize(fs, dino, i); blkfree(bn, bsize); } } void gjournal_check(const char *filesys) { - struct ufs2_dinode *dino; - void *p; + union dinodep dp; struct cgchain *cgc; struct cg *cgp; uint8_t *inosused; ino_t cino, ino; - int cg, mode; + int cg; devnam = filesys; opendisk(); /* Are there any unreferenced inodes in this file system? */ if (fs->fs_unrefs == 0) { //printf("No unreferenced inodes.\n"); closedisk(); return; } for (cg = 0; cg < fs->fs_ncg; cg++) { /* Show progress if requested. */ if (got_siginfo) { printf("%s: phase j: cyl group %d of %d (%d%%)\n", cdevname, cg, fs->fs_ncg, cg * 100 / fs->fs_ncg); got_siginfo = 0; } if (got_sigalarm) { setproctitle("%s pj %d%%", cdevname, cg * 100 / fs->fs_ncg); got_sigalarm = 0; } cgc = getcg(cg); cgp = &cgc->cgc_cg; /* Are there any unreferenced inodes in this cylinder group? */ if (cgp->cg_unrefs == 0) continue; //printf("Analizing cylinder group %d (count=%d)\n", cg, cgp->cg_unrefs); /* * We are going to modify this cylinder group, so we want it to * be written back. */ dirtycg(cgc); /* We don't want it to be freed in the meantime. */ busycg(cgc); inosused = cg_inosused(cgp); /* * Now go through the list of all inodes in this cylinder group * to find unreferenced ones. */ for (cino = 0; cino < fs->fs_ipg; cino++) { ino = fs->fs_ipg * cg + cino; /* Unallocated? Skip it. */ if (isclr(inosused, cino)) continue; - if (getino(diskp, &p, ino, &mode) == -1) - err(1, "getino(cg=%d ino=%ju)", - cg, (uintmax_t)ino); - dino = p; + if (getinode(diskp, &dp, ino) == -1) + err(1, "getinode (cg=%d ino=%ju) %s", + cg, (uintmax_t)ino, diskp->d_error); /* Not a regular file nor directory? Skip it. */ - if (!S_ISREG(dino->di_mode) && !S_ISDIR(dino->di_mode)) + if (!S_ISREG(dp.dp2->di_mode) && + !S_ISDIR(dp.dp2->di_mode)) continue; /* Has reference(s)? Skip it. */ - if (dino->di_nlink > 0) + if (dp.dp2->di_nlink > 0) continue; - //printf("Clearing inode=%d (size=%jd)\n", ino, (intmax_t)dino->di_size); + /* printf("Clearing inode=%d (size=%jd)\n", ino, + (intmax_t)dp.dp2->di_size); */ /* Free inode's blocks. */ - clear_inode(dino); + clear_inode(dp.dp2); /* Deallocate it. */ clrbit(inosused, cino); /* Update position of last used inode. */ if (ino < cgp->cg_irotor) cgp->cg_irotor = ino; /* Update statistics. */ cgp->cg_cs.cs_nifree++; fs->fs_cs(fs, cg).cs_nifree++; fs->fs_cstotal.cs_nifree++; cgp->cg_unrefs--; fs->fs_unrefs--; /* If this is directory, update related statistics. */ - if (S_ISDIR(dino->di_mode)) { + if (S_ISDIR(dp.dp2->di_mode)) { cgp->cg_cs.cs_ndir--; fs->fs_cs(fs, cg).cs_ndir--; fs->fs_cstotal.cs_ndir--; } /* Zero-fill the inode. */ - *dino = ufs2_zino; + *dp.dp2 = ufs2_zino; /* Write the inode back. */ - if (putino(diskp) == -1) - err(1, "putino(cg=%d ino=%ju)", - cg, (uintmax_t)ino); + if (putinode(diskp) == -1) + err(1, "putinode (cg=%d ino=%ju) %s", + cg, (uintmax_t)ino, diskp->d_error); if (cgp->cg_unrefs == 0) { //printf("No more unreferenced inodes in cg=%d.\n", cg); break; } } /* * We don't need this cylinder group anymore, so feel free to * free it if needed. */ unbusycg(cgc); /* * If there are no more unreferenced inodes, there is no need to * check other cylinder groups. */ if (fs->fs_unrefs == 0) { //printf("No more unreferenced inodes (cg=%d/%d).\n", cg, // fs->fs_ncg); break; } } /* Write back modified cylinder groups. */ putcgs(); /* Write back updated statistics and super-block. */ closedisk(); } Index: projects/clang700-import/sbin/fsck_ffs/inode.c =================================================================== --- projects/clang700-import/sbin/fsck_ffs/inode.c (revision 340426) +++ projects/clang700-import/sbin/fsck_ffs/inode.c (revision 340427) @@ -1,738 +1,737 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char sccsid[] = "@(#)inode.c 8.8 (Berkeley) 4/28/95"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include #include "fsck.h" static ino_t startinum; static int iblock(struct inodesc *, long ilevel, off_t isize, int type); int ckinode(union dinode *dp, struct inodesc *idesc) { off_t remsize, sizepb; int i, offset, ret; union dinode dino; ufs2_daddr_t ndb; mode_t mode; char pathbuf[MAXPATHLEN + 1]; if (idesc->id_fix != IGNORE) idesc->id_fix = DONTKNOW; idesc->id_lbn = -1; idesc->id_entryno = 0; idesc->id_filesize = DIP(dp, di_size); mode = DIP(dp, di_mode) & IFMT; if (mode == IFBLK || mode == IFCHR || (mode == IFLNK && DIP(dp, di_size) < (unsigned)sblock.fs_maxsymlinklen)) return (KEEPON); if (sblock.fs_magic == FS_UFS1_MAGIC) dino.dp1 = dp->dp1; else dino.dp2 = dp->dp2; ndb = howmany(DIP(&dino, di_size), sblock.fs_bsize); for (i = 0; i < UFS_NDADDR; i++) { idesc->id_lbn++; if (--ndb == 0 && (offset = blkoff(&sblock, DIP(&dino, di_size))) != 0) idesc->id_numfrags = numfrags(&sblock, fragroundup(&sblock, offset)); else idesc->id_numfrags = sblock.fs_frag; if (DIP(&dino, di_db[i]) == 0) { if (idesc->id_type == DATA && ndb >= 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s: CONTAINS EMPTY BLOCKS", pathbuf); if (reply("ADJUST LENGTH") == 1) { dp = ginode(idesc->id_number); DIP_SET(dp, di_size, i * sblock.fs_bsize); printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(dp); } } continue; } idesc->id_blkno = DIP(&dino, di_db[i]); if (idesc->id_type != DATA) ret = (*idesc->id_func)(idesc); else ret = dirscan(idesc); if (ret & STOP) return (ret); } idesc->id_numfrags = sblock.fs_frag; remsize = DIP(&dino, di_size) - sblock.fs_bsize * UFS_NDADDR; sizepb = sblock.fs_bsize; for (i = 0; i < UFS_NIADDR; i++) { sizepb *= NINDIR(&sblock); if (DIP(&dino, di_ib[i])) { idesc->id_blkno = DIP(&dino, di_ib[i]); ret = iblock(idesc, i + 1, remsize, BT_LEVEL1 + i); if (ret & STOP) return (ret); } else { idesc->id_lbn += sizepb / sblock.fs_bsize; if (idesc->id_type == DATA && remsize > 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s: CONTAINS EMPTY BLOCKS", pathbuf); if (reply("ADJUST LENGTH") == 1) { dp = ginode(idesc->id_number); DIP_SET(dp, di_size, DIP(dp, di_size) - remsize); remsize = 0; printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(dp); break; } } } remsize -= sizepb; } return (KEEPON); } static int iblock(struct inodesc *idesc, long ilevel, off_t isize, int type) { struct bufarea *bp; int i, n, (*func)(struct inodesc *), nif; off_t sizepb; char buf[BUFSIZ]; char pathbuf[MAXPATHLEN + 1]; union dinode *dp; if (idesc->id_type != DATA) { func = idesc->id_func; if (((n = (*func)(idesc)) & KEEPON) == 0) return (n); } else func = dirscan; if (chkrange(idesc->id_blkno, idesc->id_numfrags)) return (SKIP); bp = getdatablk(idesc->id_blkno, sblock.fs_bsize, type); ilevel--; for (sizepb = sblock.fs_bsize, i = 0; i < ilevel; i++) sizepb *= NINDIR(&sblock); if (howmany(isize, sizepb) > NINDIR(&sblock)) nif = NINDIR(&sblock); else nif = howmany(isize, sizepb); if (idesc->id_func == pass1check && nif < NINDIR(&sblock)) { for (i = nif; i < NINDIR(&sblock); i++) { if (IBLK(bp, i) == 0) continue; (void)sprintf(buf, "PARTIALLY TRUNCATED INODE I=%lu", (u_long)idesc->id_number); if (preen) { pfatal("%s", buf); } else if (dofix(idesc, buf)) { IBLK_SET(bp, i, 0); dirty(bp); } } flush(fswritefd, bp); } for (i = 0; i < nif; i++) { if (ilevel == 0) idesc->id_lbn++; if (IBLK(bp, i)) { idesc->id_blkno = IBLK(bp, i); if (ilevel == 0) n = (*func)(idesc); else n = iblock(idesc, ilevel, isize, type); if (n & STOP) { bp->b_flags &= ~B_INUSE; return (n); } } else { if (idesc->id_type == DATA && isize > 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s: CONTAINS EMPTY BLOCKS", pathbuf); if (reply("ADJUST LENGTH") == 1) { dp = ginode(idesc->id_number); DIP_SET(dp, di_size, DIP(dp, di_size) - isize); isize = 0; printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(dp); bp->b_flags &= ~B_INUSE; return(STOP); } } } isize -= sizepb; } bp->b_flags &= ~B_INUSE; return (KEEPON); } /* * Check that a block in a legal block number. * Return 0 if in range, 1 if out of range. */ int chkrange(ufs2_daddr_t blk, int cnt) { int c; if (cnt <= 0 || blk <= 0 || blk > maxfsblock || cnt - 1 > maxfsblock - blk) return (1); if (cnt > sblock.fs_frag || fragnum(&sblock, blk) + cnt > sblock.fs_frag) { if (debug) printf("bad size: blk %ld, offset %i, size %d\n", (long)blk, (int)fragnum(&sblock, blk), cnt); return (1); } c = dtog(&sblock, blk); if (blk < cgdmin(&sblock, c)) { if ((blk + cnt) > cgsblock(&sblock, c)) { if (debug) { printf("blk %ld < cgdmin %ld;", (long)blk, (long)cgdmin(&sblock, c)); printf(" blk + cnt %ld > cgsbase %ld\n", (long)(blk + cnt), (long)cgsblock(&sblock, c)); } return (1); } } else { if ((blk + cnt) > cgbase(&sblock, c+1)) { if (debug) { printf("blk %ld >= cgdmin %ld;", (long)blk, (long)cgdmin(&sblock, c)); printf(" blk + cnt %ld > sblock.fs_fpg %ld\n", (long)(blk + cnt), (long)sblock.fs_fpg); } return (1); } } return (0); } /* * General purpose interface for reading inodes. */ union dinode * ginode(ino_t inumber) { ufs2_daddr_t iblk; if (inumber < UFS_ROOTINO || inumber > maxino) errx(EEXIT, "bad inode number %ju to ginode", (uintmax_t)inumber); if (startinum == 0 || inumber < startinum || inumber >= startinum + INOPB(&sblock)) { iblk = ino_to_fsba(&sblock, inumber); if (pbp != NULL) pbp->b_flags &= ~B_INUSE; pbp = getdatablk(iblk, sblock.fs_bsize, BT_INODES); startinum = rounddown(inumber, INOPB(&sblock)); } if (sblock.fs_magic == FS_UFS1_MAGIC) return ((union dinode *) &pbp->b_un.b_dinode1[inumber % INOPB(&sblock)]); return ((union dinode *)&pbp->b_un.b_dinode2[inumber % INOPB(&sblock)]); } /* * Special purpose version of ginode used to optimize first pass * over all the inodes in numerical order. */ static ino_t nextino, lastinum, lastvalidinum; static long readcount, readpercg, fullcnt, inobufsize, partialcnt, partialsize; static struct bufarea inobuf; union dinode * getnextinode(ino_t inumber, int rebuildcg) { int j; long size; mode_t mode; ufs2_daddr_t ndb, blk; union dinode *dp; static caddr_t nextinop; if (inumber != nextino++ || inumber > lastvalidinum) errx(EEXIT, "bad inode number %ju to nextinode", (uintmax_t)inumber); if (inumber >= lastinum) { readcount++; blk = ino_to_fsba(&sblock, lastinum); if (readcount % readpercg == 0) { size = partialsize; lastinum += partialcnt; } else { size = inobufsize; lastinum += fullcnt; } /* * If getblk encounters an error, it will already have zeroed * out the buffer, so we do not need to do so here. */ getblk(&inobuf, blk, size); nextinop = inobuf.b_un.b_buf; } dp = (union dinode *)nextinop; - if (rebuildcg && nextinop == inobuf.b_un.b_buf) { + if (sblock.fs_magic == FS_UFS1_MAGIC) + nextinop += sizeof(struct ufs1_dinode); + else + nextinop += sizeof(struct ufs2_dinode); + if (rebuildcg && (char *)dp == inobuf.b_un.b_buf) { /* * Try to determine if we have reached the end of the * allocated inodes. */ mode = DIP(dp, di_mode) & IFMT; if (mode == 0) { if (memcmp(dp->dp2.di_db, ufs2_zino.di_db, UFS_NDADDR * sizeof(ufs2_daddr_t)) || memcmp(dp->dp2.di_ib, ufs2_zino.di_ib, UFS_NIADDR * sizeof(ufs2_daddr_t)) || dp->dp2.di_mode || dp->dp2.di_size) return (NULL); - goto inodegood; + return (dp); } if (!ftypeok(dp)) return (NULL); ndb = howmany(DIP(dp, di_size), sblock.fs_bsize); if (ndb < 0) return (NULL); if (mode == IFBLK || mode == IFCHR) ndb++; if (mode == IFLNK) { /* * Fake ndb value so direct/indirect block checks below * will detect any garbage after symlink string. */ if (DIP(dp, di_size) < (off_t)sblock.fs_maxsymlinklen) { ndb = howmany(DIP(dp, di_size), sizeof(ufs2_daddr_t)); if (ndb > UFS_NDADDR) { j = ndb - UFS_NDADDR; for (ndb = 1; j > 1; j--) ndb *= NINDIR(&sblock); ndb += UFS_NDADDR; } } } for (j = ndb; ndb < UFS_NDADDR && j < UFS_NDADDR; j++) if (DIP(dp, di_db[j]) != 0) return (NULL); for (j = 0, ndb -= UFS_NDADDR; ndb > 0; j++) ndb /= NINDIR(&sblock); for (; j < UFS_NIADDR; j++) if (DIP(dp, di_ib[j]) != 0) return (NULL); } -inodegood: - if (sblock.fs_magic == FS_UFS1_MAGIC) - nextinop += sizeof(struct ufs1_dinode); - else - nextinop += sizeof(struct ufs2_dinode); return (dp); } void setinodebuf(ino_t inum) { if (inum % sblock.fs_ipg != 0) errx(EEXIT, "bad inode number %ju to setinodebuf", (uintmax_t)inum); lastvalidinum = inum + sblock.fs_ipg - 1; startinum = 0; nextino = inum; lastinum = inum; readcount = 0; if (inobuf.b_un.b_buf != NULL) return; inobufsize = blkroundup(&sblock, INOBUFSIZE); fullcnt = inobufsize / ((sblock.fs_magic == FS_UFS1_MAGIC) ? sizeof(struct ufs1_dinode) : sizeof(struct ufs2_dinode)); readpercg = sblock.fs_ipg / fullcnt; partialcnt = sblock.fs_ipg % fullcnt; partialsize = partialcnt * ((sblock.fs_magic == FS_UFS1_MAGIC) ? sizeof(struct ufs1_dinode) : sizeof(struct ufs2_dinode)); if (partialcnt != 0) { readpercg++; } else { partialcnt = fullcnt; partialsize = inobufsize; } initbarea(&inobuf, BT_INODES); if ((inobuf.b_un.b_buf = Malloc((unsigned)inobufsize)) == NULL) errx(EEXIT, "cannot allocate space for inode buffer"); } void freeinodebuf(void) { if (inobuf.b_un.b_buf != NULL) free((char *)inobuf.b_un.b_buf); inobuf.b_un.b_buf = NULL; } /* * Routines to maintain information about directory inodes. * This is built during the first pass and used during the * second and third passes. * * Enter inodes into the cache. */ void cacheino(union dinode *dp, ino_t inumber) { struct inoinfo *inp, **inpp; int i, blks; if (howmany(DIP(dp, di_size), sblock.fs_bsize) > UFS_NDADDR) blks = UFS_NDADDR + UFS_NIADDR; else if (DIP(dp, di_size) > 0) blks = howmany(DIP(dp, di_size), sblock.fs_bsize); else blks = 1; inp = (struct inoinfo *) Malloc(sizeof(*inp) + (blks - 1) * sizeof(ufs2_daddr_t)); if (inp == NULL) errx(EEXIT, "cannot increase directory list"); inpp = &inphead[inumber % dirhash]; inp->i_nexthash = *inpp; *inpp = inp; inp->i_parent = inumber == UFS_ROOTINO ? UFS_ROOTINO : (ino_t)0; inp->i_dotdot = (ino_t)0; inp->i_number = inumber; inp->i_isize = DIP(dp, di_size); inp->i_numblks = blks; for (i = 0; i < MIN(blks, UFS_NDADDR); i++) inp->i_blks[i] = DIP(dp, di_db[i]); if (blks > UFS_NDADDR) for (i = 0; i < UFS_NIADDR; i++) inp->i_blks[UFS_NDADDR + i] = DIP(dp, di_ib[i]); if (inplast == listmax) { listmax += 100; inpsort = (struct inoinfo **)reallocarray((char *)inpsort, listmax, sizeof(struct inoinfo *)); if (inpsort == NULL) errx(EEXIT, "cannot increase directory list"); } inpsort[inplast++] = inp; } /* * Look up an inode cache structure. */ struct inoinfo * getinoinfo(ino_t inumber) { struct inoinfo *inp; for (inp = inphead[inumber % dirhash]; inp; inp = inp->i_nexthash) { if (inp->i_number != inumber) continue; return (inp); } errx(EEXIT, "cannot find inode %ju", (uintmax_t)inumber); return ((struct inoinfo *)0); } /* * Clean up all the inode cache structure. */ void inocleanup(void) { struct inoinfo **inpp; if (inphead == NULL) return; for (inpp = &inpsort[inplast - 1]; inpp >= inpsort; inpp--) free((char *)(*inpp)); free((char *)inphead); free((char *)inpsort); inphead = inpsort = NULL; } void inodirty(union dinode *dp) { dirty(pbp); } void clri(struct inodesc *idesc, const char *type, int flag) { union dinode *dp; dp = ginode(idesc->id_number); if (flag == 1) { pwarn("%s %s", type, (DIP(dp, di_mode) & IFMT) == IFDIR ? "DIR" : "FILE"); - pinode(idesc->id_number); + prtinode(idesc->id_number, dp); + printf("\n"); } if (preen || reply("CLEAR") == 1) { if (preen) printf(" (CLEARED)\n"); n_files--; if (bkgrdflag == 0) { (void)ckinode(dp, idesc); inoinfo(idesc->id_number)->ino_state = USTATE; clearinode(dp); inodirty(dp); } else { cmd.value = idesc->id_number; cmd.size = -DIP(dp, di_nlink); if (debug) printf("adjrefcnt ino %ld amt %lld\n", (long)cmd.value, (long long)cmd.size); if (sysctl(adjrefcnt, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST INODE", cmd.value); } } } int findname(struct inodesc *idesc) { struct direct *dirp = idesc->id_dirp; if (dirp->d_ino != idesc->id_parent || idesc->id_entryno < 2) { idesc->id_entryno++; return (KEEPON); } memmove(idesc->id_name, dirp->d_name, (size_t)dirp->d_namlen + 1); return (STOP|FOUND); } int findino(struct inodesc *idesc) { struct direct *dirp = idesc->id_dirp; if (dirp->d_ino == 0) return (KEEPON); if (strcmp(dirp->d_name, idesc->id_name) == 0 && dirp->d_ino >= UFS_ROOTINO && dirp->d_ino <= maxino) { idesc->id_parent = dirp->d_ino; return (STOP|FOUND); } return (KEEPON); } int clearentry(struct inodesc *idesc) { struct direct *dirp = idesc->id_dirp; if (dirp->d_ino != idesc->id_parent || idesc->id_entryno < 2) { idesc->id_entryno++; return (KEEPON); } dirp->d_ino = 0; return (STOP|FOUND|ALTERED); } void -pinode(ino_t ino) +prtinode(ino_t ino, union dinode *dp) { - union dinode *dp; char *p; struct passwd *pw; time_t t; printf(" I=%lu ", (u_long)ino); if (ino < UFS_ROOTINO || ino > maxino) return; - dp = ginode(ino); printf(" OWNER="); if ((pw = getpwuid((int)DIP(dp, di_uid))) != NULL) printf("%s ", pw->pw_name); else printf("%u ", (unsigned)DIP(dp, di_uid)); printf("MODE=%o\n", DIP(dp, di_mode)); if (preen) printf("%s: ", cdevname); printf("SIZE=%ju ", (uintmax_t)DIP(dp, di_size)); t = DIP(dp, di_mtime); p = ctime(&t); printf("MTIME=%12.12s %4.4s ", &p[4], &p[20]); } void blkerror(ino_t ino, const char *type, ufs2_daddr_t blk) { pfatal("%jd %s I=%ju", (intmax_t)blk, type, (uintmax_t)ino); printf("\n"); switch (inoinfo(ino)->ino_state) { case FSTATE: case FZLINK: inoinfo(ino)->ino_state = FCLEAR; return; case DSTATE: case DZLINK: inoinfo(ino)->ino_state = DCLEAR; return; case FCLEAR: case DCLEAR: return; default: errx(EEXIT, "BAD STATE %d TO BLKERR", inoinfo(ino)->ino_state); /* NOTREACHED */ } } /* * allocate an unused inode */ ino_t allocino(ino_t request, int type) { ino_t ino; union dinode *dp; struct bufarea *cgbp; struct cg *cgp; int cg; if (request == 0) request = UFS_ROOTINO; else if (inoinfo(request)->ino_state != USTATE) return (0); for (ino = request; ino < maxino; ino++) if (inoinfo(ino)->ino_state == USTATE) break; if (ino == maxino) return (0); cg = ino_to_cg(&sblock, ino); cgbp = cglookup(cg); cgp = cgbp->b_un.b_cg; if (!check_cgmagic(cg, cgbp)) return (0); setbit(cg_inosused(cgp), ino % sblock.fs_ipg); cgp->cg_cs.cs_nifree--; switch (type & IFMT) { case IFDIR: inoinfo(ino)->ino_state = DSTATE; cgp->cg_cs.cs_ndir++; break; case IFREG: case IFLNK: inoinfo(ino)->ino_state = FSTATE; break; default: return (0); } dirty(cgbp); dp = ginode(ino); DIP_SET(dp, di_db[0], allocblk((long)1)); if (DIP(dp, di_db[0]) == 0) { inoinfo(ino)->ino_state = USTATE; return (0); } DIP_SET(dp, di_mode, type); DIP_SET(dp, di_flags, 0); DIP_SET(dp, di_atime, time(NULL)); DIP_SET(dp, di_ctime, DIP(dp, di_atime)); DIP_SET(dp, di_mtime, DIP(dp, di_ctime)); DIP_SET(dp, di_mtimensec, 0); DIP_SET(dp, di_ctimensec, 0); DIP_SET(dp, di_atimensec, 0); DIP_SET(dp, di_size, sblock.fs_fsize); DIP_SET(dp, di_blocks, btodb(sblock.fs_fsize)); n_files++; inodirty(dp); inoinfo(ino)->ino_type = IFTODT(type); return (ino); } /* * deallocate an inode */ void freeino(ino_t ino) { struct inodesc idesc; union dinode *dp; memset(&idesc, 0, sizeof(struct inodesc)); idesc.id_type = ADDR; idesc.id_func = pass4check; idesc.id_number = ino; dp = ginode(ino); (void)ckinode(dp, &idesc); clearinode(dp); inodirty(dp); inoinfo(ino)->ino_state = USTATE; n_files--; } Index: projects/clang700-import/sbin/fsck_ffs/main.c =================================================================== --- projects/clang700-import/sbin/fsck_ffs/main.c (revision 340426) +++ projects/clang700-import/sbin/fsck_ffs/main.c (revision 340427) @@ -1,740 +1,750 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1980, 1986, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint static char sccsid[] = "@(#)main.c 8.6 (Berkeley) 5/14/95"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #define IN_RTLD /* So we pickup the P_OSREL defines */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fsck.h" int restarts; static void usage(void) __dead2; static intmax_t argtoimax(int flag, const char *req, const char *str, int base); static int checkfilesys(char *filesys); static int chkdoreload(struct statfs *mntp); static struct statfs *getmntpt(const char *); int main(int argc, char *argv[]) { int ch; struct rlimit rlimit; struct itimerval itimerval; int fsret; int ret = 0; sync(); skipclean = 1; inoopt = 0; while ((ch = getopt(argc, argv, "b:Bc:CdEfFm:npRrSyZ")) != -1) { switch (ch) { case 'b': skipclean = 0; bflag = argtoimax('b', "number", optarg, 10); printf("Alternate super block location: %jd\n", bflag); break; case 'B': bkgrdflag = 1; break; case 'c': skipclean = 0; cvtlevel = argtoimax('c', "conversion level", optarg, 10); if (cvtlevel < 3) errx(EEXIT, "cannot do level %d conversion", cvtlevel); break; case 'd': debug++; break; case 'E': Eflag++; break; case 'f': skipclean = 0; break; case 'F': bkgrdcheck = 1; break; case 'm': lfmode = argtoimax('m', "mode", optarg, 8); if (lfmode &~ 07777) errx(EEXIT, "bad mode to -m: %o", lfmode); printf("** lost+found creation mode %o\n", lfmode); break; case 'n': nflag++; yflag = 0; break; case 'p': preen++; /*FALLTHROUGH*/ case 'C': ckclean++; break; case 'R': wantrestart = 1; break; case 'r': inoopt++; break; case 'S': surrender = 1; break; case 'y': yflag++; nflag = 0; break; case 'Z': Zflag++; break; default: usage(); } } argc -= optind; argv += optind; if (!argc) usage(); if (signal(SIGINT, SIG_IGN) != SIG_IGN) (void)signal(SIGINT, catch); if (ckclean) (void)signal(SIGQUIT, catchquit); signal(SIGINFO, infohandler); if (bkgrdflag) { signal(SIGALRM, alarmhandler); itimerval.it_interval.tv_sec = 5; itimerval.it_interval.tv_usec = 0; itimerval.it_value.tv_sec = 5; itimerval.it_value.tv_usec = 0; setitimer(ITIMER_REAL, &itimerval, NULL); } /* * Push up our allowed memory limit so we can cope * with huge file systems. */ if (getrlimit(RLIMIT_DATA, &rlimit) == 0) { rlimit.rlim_cur = rlimit.rlim_max; (void)setrlimit(RLIMIT_DATA, &rlimit); } while (argc > 0) { if ((fsret = checkfilesys(*argv)) == ERESTART) continue; ret |= fsret; argc--; argv++; } if (returntosingle) ret = 2; exit(ret); } static intmax_t argtoimax(int flag, const char *req, const char *str, int base) { char *cp; intmax_t ret; ret = strtoimax(str, &cp, base); if (cp == str || *cp) errx(EEXIT, "-%c flag requires a %s", flag, req); return (ret); } /* * Check the specified file system. */ /* ARGSUSED */ static int checkfilesys(char *filesys) { ufs2_daddr_t n_ffree, n_bfree; struct dups *dp; struct statfs *mntp; struct stat snapdir; struct group *grp; struct iovec *iov; char errmsg[255]; int ofsmodified; int iovlen; int cylno; intmax_t blks, files; size_t size; iov = NULL; iovlen = 0; errmsg[0] = '\0'; fsutilinit(); fsckinit(); cdevname = filesys; if (debug && ckclean) pwarn("starting\n"); /* * Make best effort to get the disk name. Check first to see * if it is listed among the mounted file systems. Failing that * check to see if it is listed in /etc/fstab. */ mntp = getmntpt(filesys); if (mntp != NULL) filesys = mntp->f_mntfromname; else filesys = blockcheck(filesys); /* * If -F flag specified, check to see whether a background check * is possible and needed. If possible and needed, exit with * status zero. Otherwise exit with status non-zero. A non-zero * exit status will cause a foreground check to be run. */ sblock_init(); if (bkgrdcheck) { if ((fsreadfd = open(filesys, O_RDONLY)) < 0 || readsb(0) == 0) exit(3); /* Cannot read superblock */ close(fsreadfd); /* Earlier background failed or journaled */ if (sblock.fs_flags & (FS_NEEDSFSCK | FS_SUJ)) exit(4); if ((sblock.fs_flags & FS_DOSOFTDEP) == 0) exit(5); /* Not running soft updates */ size = MIBSIZE; if (sysctlnametomib("vfs.ffs.adjrefcnt", adjrefcnt, &size) < 0) exit(6); /* Lacks kernel support */ if ((mntp == NULL && sblock.fs_clean == 1) || (mntp != NULL && (sblock.fs_flags & FS_UNCLEAN) == 0)) exit(7); /* Filesystem clean, report it now */ exit(0); } if (ckclean && skipclean) { /* * If file system is gjournaled, check it here. */ if ((fsreadfd = open(filesys, O_RDONLY)) < 0 || readsb(0) == 0) exit(3); /* Cannot read superblock */ close(fsreadfd); if ((sblock.fs_flags & FS_GJOURNAL) != 0) { //printf("GJournaled file system detected on %s.\n", // filesys); if (sblock.fs_clean == 1) { pwarn("FILE SYSTEM CLEAN; SKIPPING CHECKS\n"); exit(0); } if ((sblock.fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) { gjournal_check(filesys); if (chkdoreload(mntp) == 0) exit(0); exit(4); } else { pfatal( "UNEXPECTED INCONSISTENCY, CANNOT RUN FAST FSCK\n"); } } } /* * If we are to do a background check: * Get the mount point information of the file system * create snapshot file * return created snapshot file * if not found, clear bkgrdflag and proceed with normal fsck */ if (bkgrdflag) { if (mntp == NULL) { bkgrdflag = 0; pfatal("NOT MOUNTED, CANNOT RUN IN BACKGROUND\n"); } else if ((mntp->f_flags & MNT_SOFTDEP) == 0) { bkgrdflag = 0; pfatal( "NOT USING SOFT UPDATES, CANNOT RUN IN BACKGROUND\n"); } else if ((mntp->f_flags & MNT_RDONLY) != 0) { bkgrdflag = 0; pfatal("MOUNTED READ-ONLY, CANNOT RUN IN BACKGROUND\n"); } else if ((fsreadfd = open(filesys, O_RDONLY)) >= 0) { if (readsb(0) != 0) { if (sblock.fs_flags & (FS_NEEDSFSCK | FS_SUJ)) { bkgrdflag = 0; pfatal( "UNEXPECTED INCONSISTENCY, CANNOT RUN IN BACKGROUND\n"); } if ((sblock.fs_flags & FS_UNCLEAN) == 0 && skipclean && ckclean) { /* * file system is clean; * skip snapshot and report it clean */ pwarn( "FILE SYSTEM CLEAN; SKIPPING CHECKS\n"); goto clean; } } close(fsreadfd); } if (bkgrdflag) { snprintf(snapname, sizeof snapname, "%s/.snap", mntp->f_mntonname); if (stat(snapname, &snapdir) < 0) { if (errno != ENOENT) { bkgrdflag = 0; pfatal( "CANNOT FIND SNAPSHOT DIRECTORY %s: %s, CANNOT RUN IN BACKGROUND\n", snapname, strerror(errno)); } else if ((grp = getgrnam("operator")) == NULL || mkdir(snapname, 0770) < 0 || chown(snapname, -1, grp->gr_gid) < 0 || chmod(snapname, 0770) < 0) { bkgrdflag = 0; pfatal( "CANNOT CREATE SNAPSHOT DIRECTORY %s: %s, CANNOT RUN IN BACKGROUND\n", snapname, strerror(errno)); } } else if (!S_ISDIR(snapdir.st_mode)) { bkgrdflag = 0; pfatal( "%s IS NOT A DIRECTORY, CANNOT RUN IN BACKGROUND\n", snapname); } } if (bkgrdflag) { snprintf(snapname, sizeof snapname, "%s/.snap/fsck_snapshot", mntp->f_mntonname); build_iovec(&iov, &iovlen, "fstype", "ffs", 4); build_iovec(&iov, &iovlen, "from", snapname, (size_t)-1); build_iovec(&iov, &iovlen, "fspath", mntp->f_mntonname, (size_t)-1); build_iovec(&iov, &iovlen, "errmsg", errmsg, sizeof(errmsg)); build_iovec(&iov, &iovlen, "update", NULL, 0); build_iovec(&iov, &iovlen, "snapshot", NULL, 0); while (nmount(iov, iovlen, mntp->f_flags) < 0) { if (errno == EEXIST && unlink(snapname) == 0) continue; bkgrdflag = 0; pfatal("CANNOT CREATE SNAPSHOT %s: %s %s\n", snapname, strerror(errno), errmsg); break; } if (bkgrdflag != 0) filesys = snapname; } } switch (setup(filesys)) { case 0: if (preen) pfatal("CAN'T CHECK FILE SYSTEM."); return (0); case -1: clean: pwarn("clean, %ld free ", (long)(sblock.fs_cstotal.cs_nffree + sblock.fs_frag * sblock.fs_cstotal.cs_nbfree)); printf("(%jd frags, %jd blocks, %.1f%% fragmentation)\n", (intmax_t)sblock.fs_cstotal.cs_nffree, (intmax_t)sblock.fs_cstotal.cs_nbfree, sblock.fs_cstotal.cs_nffree * 100.0 / sblock.fs_dsize); return (0); } /* * Determine if we can and should do journal recovery. */ if ((sblock.fs_flags & FS_SUJ) == FS_SUJ) { if ((sblock.fs_flags & FS_NEEDSFSCK) != FS_NEEDSFSCK && skipclean) { if (preen || reply("USE JOURNAL")) { if (suj_check(filesys) == 0) { printf("\n***** FILE SYSTEM MARKED CLEAN *****\n"); if (chkdoreload(mntp) == 0) exit(0); exit(4); } } printf("** Skipping journal, falling through to full fsck\n\n"); } /* * Write the superblock so we don't try to recover the * journal on another pass. If this is the only change * to the filesystem, we do not want it to be called * out as modified. */ sblock.fs_mtime = time(NULL); sbdirty(); ofsmodified = fsmodified; flush(fswritefd, &sblk); fsmodified = ofsmodified; } /* * If the filesystem was run on an old kernel that did not * support check hashes, clear the check-hash flags so that * we do not try to verify them. */ if ((sblock.fs_flags & FS_METACKHASH) == 0) sblock.fs_metackhash = 0; /* * If we are running on a kernel that can provide check hashes * that are not yet enabled for the filesystem and we are * running manually without the -y flag, offer to add any * supported check hashes that are not already enabled. */ ckhashadd = 0; if (preen == 0 && yflag == 0 && sblock.fs_magic != FS_UFS1_MAGIC && fswritefd != -1 && getosreldate() >= P_OSREL_CK_CYLGRP) { if ((sblock.fs_metackhash & CK_CYLGRP) == 0 && - reply("ADD CYLINDER GROUP CHECK-HASH PROTECTION") != 0) + reply("ADD CYLINDER GROUP CHECK-HASH PROTECTION") != 0) { ckhashadd |= CK_CYLGRP; + sblock.fs_metackhash |= CK_CYLGRP; + } if ((sblock.fs_metackhash & CK_SUPERBLOCK) == 0 && getosreldate() >= P_OSREL_CK_SUPERBLOCK && reply("ADD SUPERBLOCK CHECK-HASH PROTECTION") != 0) { + ckhashadd |= CK_SUPERBLOCK; sblock.fs_metackhash |= CK_SUPERBLOCK; - sbdirty(); } #ifdef notyet if ((sblock.fs_metackhash & CK_INODE) == 0 && getosreldate() >= P_OSREL_CK_INODE && - reply("ADD INODE CHECK-HASH PROTECTION") != 0) + reply("ADD INODE CHECK-HASH PROTECTION") != 0) { ckhashadd |= CK_INODE; + sblock.fs_metackhash |= CK_INODE; + } if ((sblock.fs_metackhash & CK_INDIR) == 0 && getosreldate() >= P_OSREL_CK_INDIR && - reply("ADD INDIRECT BLOCK CHECK-HASH PROTECTION") != 0) + reply("ADD INDIRECT BLOCK CHECK-HASH PROTECTION") != 0) { ckhashadd |= CK_INDIR; + sblock.fs_metackhash |= CK_INDIR; + } if ((sblock.fs_metackhash & CK_DIR) == 0 && getosreldate() >= P_OSREL_CK_DIR && - reply("ADD DIRECTORY CHECK-HASH PROTECTION") != 0) + reply("ADD DIRECTORY CHECK-HASH PROTECTION") != 0) { ckhashadd |= CK_DIR; + sblock.fs_metackhash |= CK_DIR; + } #endif /* notyet */ - if (ckhashadd != 0) + if (ckhashadd != 0) { sblock.fs_flags |= FS_METACKHASH; + sbdirty(); + } } /* * Cleared if any questions answered no. Used to decide if * the superblock should be marked clean. */ resolved = 1; /* * 1: scan inodes tallying blocks used */ if (preen == 0) { printf("** Last Mounted on %s\n", sblock.fs_fsmnt); if (mntp != NULL && mntp->f_flags & MNT_ROOTFS) printf("** Root file system\n"); printf("** Phase 1 - Check Blocks and Sizes\n"); } clock_gettime(CLOCK_REALTIME_PRECISE, &startprog); pass1(); IOstats("Pass1"); /* * 1b: locate first references to duplicates, if any */ if (duplist) { if (preen || usedsoftdep) pfatal("INTERNAL ERROR: dups with %s%s%s", preen ? "-p" : "", (preen && usedsoftdep) ? " and " : "", usedsoftdep ? "softupdates" : ""); printf("** Phase 1b - Rescan For More DUPS\n"); pass1b(); IOstats("Pass1b"); } /* * 2: traverse directories from root to mark all connected directories */ if (preen == 0) printf("** Phase 2 - Check Pathnames\n"); pass2(); IOstats("Pass2"); /* * 3: scan inodes looking for disconnected directories */ if (preen == 0) printf("** Phase 3 - Check Connectivity\n"); pass3(); IOstats("Pass3"); /* * 4: scan inodes looking for disconnected files; check reference counts */ if (preen == 0) printf("** Phase 4 - Check Reference Counts\n"); pass4(); IOstats("Pass4"); /* * 5: check and repair resource counts in cylinder groups */ if (preen == 0) printf("** Phase 5 - Check Cyl groups\n"); pass5(); IOstats("Pass5"); /* * print out summary statistics */ n_ffree = sblock.fs_cstotal.cs_nffree; n_bfree = sblock.fs_cstotal.cs_nbfree; files = maxino - UFS_ROOTINO - sblock.fs_cstotal.cs_nifree - n_files; blks = n_blks + sblock.fs_ncg * (cgdmin(&sblock, 0) - cgsblock(&sblock, 0)); blks += cgsblock(&sblock, 0) - cgbase(&sblock, 0); blks += howmany(sblock.fs_cssize, sblock.fs_fsize); blks = maxfsblock - (n_ffree + sblock.fs_frag * n_bfree) - blks; if (bkgrdflag && (files > 0 || blks > 0)) { countdirs = sblock.fs_cstotal.cs_ndir - countdirs; pwarn("Reclaimed: %ld directories, %jd files, %jd fragments\n", countdirs, files - countdirs, blks); } pwarn("%ld files, %jd used, %ju free ", (long)n_files, (intmax_t)n_blks, (uintmax_t)n_ffree + sblock.fs_frag * n_bfree); printf("(%ju frags, %ju blocks, %.1f%% fragmentation)\n", (uintmax_t)n_ffree, (uintmax_t)n_bfree, n_ffree * 100.0 / sblock.fs_dsize); if (debug) { if (files < 0) printf("%jd inodes missing\n", -files); if (blks < 0) printf("%jd blocks missing\n", -blks); if (duplist != NULL) { printf("The following duplicate blocks remain:"); for (dp = duplist; dp; dp = dp->next) printf(" %jd,", (intmax_t)dp->dup); printf("\n"); } } duplist = (struct dups *)0; muldup = (struct dups *)0; inocleanup(); if (fsmodified) { sblock.fs_time = time(NULL); sbdirty(); } if (cvtlevel && sblk.b_dirty) { /* * Write out the duplicate super blocks */ for (cylno = 0; cylno < sblock.fs_ncg; cylno++) blwrite(fswritefd, (char *)&sblock, fsbtodb(&sblock, cgsblock(&sblock, cylno)), SBLOCKSIZE); } if (rerun) resolved = 0; finalIOstats(); /* * Check to see if the file system is mounted read-write. */ if (bkgrdflag == 0 && mntp != NULL && (mntp->f_flags & MNT_RDONLY) == 0) resolved = 0; ckfini(resolved); for (cylno = 0; cylno < sblock.fs_ncg; cylno++) if (inostathead[cylno].il_stat != NULL) free((char *)inostathead[cylno].il_stat); free((char *)inostathead); inostathead = NULL; if (fsmodified && !preen) printf("\n***** FILE SYSTEM WAS MODIFIED *****\n"); if (rerun) { if (wantrestart && (restarts++ < 10) && (preen || reply("RESTART"))) return (ERESTART); printf("\n***** PLEASE RERUN FSCK *****\n"); } if (chkdoreload(mntp) != 0) { if (!fsmodified) return (0); if (!preen) printf("\n***** REBOOT NOW *****\n"); sync(); return (4); } return (rerun ? ERERUN : 0); } static int chkdoreload(struct statfs *mntp) { struct iovec *iov; int iovlen; char errmsg[255]; if (mntp == NULL) return (0); iov = NULL; iovlen = 0; errmsg[0] = '\0'; /* * We modified a mounted file system. Do a mount update on * it unless it is read-write, so we can continue using it * as safely as possible. */ if (mntp->f_flags & MNT_RDONLY) { build_iovec(&iov, &iovlen, "fstype", "ffs", 4); build_iovec(&iov, &iovlen, "from", mntp->f_mntfromname, (size_t)-1); build_iovec(&iov, &iovlen, "fspath", mntp->f_mntonname, (size_t)-1); build_iovec(&iov, &iovlen, "errmsg", errmsg, sizeof(errmsg)); build_iovec(&iov, &iovlen, "update", NULL, 0); build_iovec(&iov, &iovlen, "reload", NULL, 0); /* * XX: We need the following line until we clean up * nmount parsing of root mounts and NFS root mounts. */ build_iovec(&iov, &iovlen, "ro", NULL, 0); if (nmount(iov, iovlen, mntp->f_flags) == 0) { return (0); } pwarn("mount reload of '%s' failed: %s %s\n\n", mntp->f_mntonname, strerror(errno), errmsg); return (1); } return (0); } /* * Get the mount point information for name. */ static struct statfs * getmntpt(const char *name) { struct stat devstat, mntdevstat; char device[sizeof(_PATH_DEV) - 1 + MNAMELEN]; char *ddevname; struct statfs *mntbuf, *statfsp; int i, mntsize, isdev; if (stat(name, &devstat) != 0) return (NULL); if (S_ISCHR(devstat.st_mode) || S_ISBLK(devstat.st_mode)) isdev = 1; else isdev = 0; mntsize = getmntinfo(&mntbuf, MNT_NOWAIT); for (i = 0; i < mntsize; i++) { statfsp = &mntbuf[i]; ddevname = statfsp->f_mntfromname; if (*ddevname != '/') { if (strlen(_PATH_DEV) + strlen(ddevname) + 1 > sizeof(statfsp->f_mntfromname)) continue; strcpy(device, _PATH_DEV); strcat(device, ddevname); strcpy(statfsp->f_mntfromname, device); } if (isdev == 0) { if (strcmp(name, statfsp->f_mntonname)) continue; return (statfsp); } if (stat(ddevname, &mntdevstat) == 0 && mntdevstat.st_rdev == devstat.st_rdev) return (statfsp); } statfsp = NULL; return (statfsp); } static void usage(void) { (void) fprintf(stderr, "usage: %s [-BCdEFfnpRrSyZ] [-b block] [-c level] [-m mode] filesystem ...\n", getprogname()); exit(1); } void infohandler(int sig __unused) { got_siginfo = 1; } void alarmhandler(int sig __unused) { got_sigalarm = 1; } Index: projects/clang700-import/sbin/fsck_ffs/pass5.c =================================================================== --- projects/clang700-import/sbin/fsck_ffs/pass5.c (revision 340426) +++ projects/clang700-import/sbin/fsck_ffs/pass5.c (revision 340427) @@ -1,626 +1,623 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char sccsid[] = "@(#)pass5.c 8.9 (Berkeley) 4/28/95"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "fsck.h" static void check_maps(u_char *, u_char *, int, ufs2_daddr_t, const char *, int *, int, int, int); static void clear_blocks(ufs2_daddr_t start, ufs2_daddr_t end); void pass5(void) { int c, i, j, blk, frags, basesize, mapsize; int inomapsize, blkmapsize; struct fs *fs = &sblock; ufs2_daddr_t d, dbase, dmax, start; int rewritecg = 0; struct csum *cs; struct csum_total cstotal; struct inodesc idesc[3]; char buf[MAXBSIZE]; struct cg *cg, *newcg = (struct cg *)buf; struct bufarea *cgbp; inoinfo(UFS_WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; /* check to see if we are to add a cylinder group check hash */ - if ((ckhashadd & CK_CYLGRP) != 0) { - fs->fs_metackhash |= CK_CYLGRP; + if ((ckhashadd & CK_CYLGRP) != 0) rewritecg = 1; - sbdirty(); - } if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; rewritecg = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { const char *doit = NULL; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > (u_int)fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); fs->fs_cgsize = fragroundup(fs, CGSIZE(fs)); rewritecg = 1; sbdirty(); } } } } basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); if (sblock.fs_magic == FS_UFS2_MAGIC) { newcg->cg_iusedoff = basesize; } else { /* * We reserve the space for the old rotation summary * tables for the benefit of old kernels, but do not * maintain them in modern kernels. In time, they can * go away. */ newcg->cg_old_btotoff = basesize; newcg->cg_old_boff = newcg->cg_old_btotoff + fs->fs_old_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_old_boff + fs->fs_old_cpg * fs->fs_old_nrpos * sizeof(u_int16_t); memset(&newcg->cg_space[0], 0, newcg->cg_iusedoff - basesize); } inomapsize = howmany(fs->fs_ipg, CHAR_BIT); newcg->cg_freeoff = newcg->cg_iusedoff + inomapsize; blkmapsize = howmany(fs->fs_fpg, CHAR_BIT); newcg->cg_nextfreeoff = newcg->cg_freeoff + blkmapsize; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT); } newcg->cg_magic = CG_MAGIC; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 3; i++) idesc[i].id_type = ADDR; memset(&cstotal, 0, sizeof(struct csum_total)); dmax = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (d = fs->fs_size; d < dmax; d++) setbmap(d); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { printf("%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } if (got_sigalarm) { setproctitle("%s p5 %d%%", cdevname, c * 100 / sblock.fs_ncg); got_sigalarm = 0; } cgbp = cglookup(c); cg = cgbp->b_un.b_cg; if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); /* * If we have a cylinder group check hash and are not adding * it for the first time, verify that it is good. */ if ((fs->fs_metackhash & CK_CYLGRP) != 0 && (ckhashadd & CK_CYLGRP) == 0) { uint32_t ckhash, thishash; ckhash = cg->cg_ckhash; cg->cg_ckhash = 0; thishash = calculate_crc32c(~0L, cg, fs->fs_cgsize); if (ckhash != thishash) pwarn("CG %d: BAD CHECK-HASH %#x vs %#x\n", c, ckhash, thishash); cg->cg_ckhash = ckhash; } newcg->cg_time = cg->cg_time; newcg->cg_old_time = cg->cg_old_time; newcg->cg_unrefs = cg->cg_unrefs; newcg->cg_cgx = c; dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; newcg->cg_ndblk = dmax - dbase; if (fs->fs_magic == FS_UFS1_MAGIC) { if (c == fs->fs_ncg - 1) newcg->cg_old_ncyl = howmany(newcg->cg_ndblk, fs->fs_fpg / fs->fs_old_cpg); else newcg->cg_old_ncyl = fs->fs_old_cpg; newcg->cg_old_niblk = fs->fs_ipg; newcg->cg_niblk = 0; } if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor >= 0 && cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor >= 0 && cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor >= 0 && cg->cg_irotor < fs->fs_ipg) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; if (fs->fs_magic == FS_UFS1_MAGIC) { newcg->cg_initediblk = 0; } else { if ((unsigned)cg->cg_initediblk > fs->fs_ipg) newcg->cg_initediblk = fs->fs_ipg; else newcg->cg_initediblk = cg->cg_initediblk; } memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(cg_inosused(newcg), 0, (size_t)(mapsize)); j = fs->fs_ipg * c; for (i = 0; i < inostathead[c].il_numalloced; j++, i++) { switch (inoinfo(j)->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: case DZLINK: newcg->cg_cs.cs_ndir++; /* FALLTHROUGH */ case FSTATE: case FCLEAR: case FZLINK: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < (int)UFS_ROOTINO) break; errx(EEXIT, "BAD STATE %d FOR INODE I=%d", inoinfo(j)->ino_state, j); } } if (c == 0) for (i = 0; i < (int)UFS_ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } start = -1; for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) { if ((Eflag || Zflag) && start != -1) { clear_blocks(start, d + j - 1); start = -1; } continue; } if (start == -1) start = d + j; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg), i / fs->fs_frag); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1); } } if ((Eflag || Zflag) && start != -1) clear_blocks(start, d - 1); if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg); u_char *mapp = cg_clustersfree(newcg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (CHAR_BIT - 1)) != (CHAR_BIT - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } if ((fs->fs_metackhash & CK_CYLGRP) != 0) { newcg->cg_ckhash = 0; newcg->cg_ckhash = calculate_crc32c(~0L, (void *)newcg, fs->fs_cgsize); } if (bkgrdflag != 0) { cstotal.cs_nffree += cg->cg_cs.cs_nffree; cstotal.cs_nbfree += cg->cg_cs.cs_nbfree; cstotal.cs_nifree += cg->cg_cs.cs_nifree; cstotal.cs_ndir += cg->cg_cs.cs_ndir; } else { cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; } cs = &fs->fs_cs(fs, c); if (cursnapshot == 0 && memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } if (rewritecg) { memmove(cg, newcg, (size_t)fs->fs_cgsize); dirty(cgbp); continue; } if (cursnapshot == 0 && memcmp(newcg, cg, basesize) != 0 && dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); dirty(cgbp); } if (bkgrdflag != 0 || usedsoftdep || debug) update_maps(cg, newcg, bkgrdflag); if (cursnapshot == 0 && memcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg), cg_inosused(newcg), (size_t)mapsize); dirty(cgbp); } } if (cursnapshot == 0 && memcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal) != 0 && dofix(&idesc[0], "SUMMARY BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof cstotal); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } /* * When doing background fsck on a snapshot, figure out whether * the superblock summary is inaccurate and correct it when * necessary. */ if (cursnapshot != 0) { cmd.size = 1; cmd.value = cstotal.cs_ndir - fs->fs_cstotal.cs_ndir; if (cmd.value != 0) { if (debug) printf("adjndir by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjndir, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF DIRECTORIES", cmd.value); } cmd.value = cstotal.cs_nbfree - fs->fs_cstotal.cs_nbfree; if (cmd.value != 0) { if (debug) printf("adjnbfree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnbfree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE BLOCKS", cmd.value); } cmd.value = cstotal.cs_nifree - fs->fs_cstotal.cs_nifree; if (cmd.value != 0) { if (debug) printf("adjnifree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnifree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE INODES", cmd.value); } cmd.value = cstotal.cs_nffree - fs->fs_cstotal.cs_nffree; if (cmd.value != 0) { if (debug) printf("adjnffree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnffree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE FRAGS", cmd.value); } cmd.value = cstotal.cs_numclusters - fs->fs_cstotal.cs_numclusters; if (cmd.value != 0) { if (debug) printf("adjnumclusters by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnumclusters, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE CLUSTERS", cmd.value); } } } /* * Compare the original cylinder group inode and block bitmaps with the * updated cylinder group inode and block bitmaps. Free inodes and blocks * that have been added. Complain if any previously freed inodes blocks * are now allocated. */ void update_maps( struct cg *oldcg, /* cylinder group of claimed allocations */ struct cg *newcg, /* cylinder group of determined allocations */ int usesysctl) /* 1 => use sysctl interface to update maps */ { int inomapsize, excessdirs; struct fs *fs = &sblock; inomapsize = howmany(fs->fs_ipg, CHAR_BIT); excessdirs = oldcg->cg_cs.cs_ndir - newcg->cg_cs.cs_ndir; if (excessdirs < 0) { pfatal("LOST %d DIRECTORIES\n", -excessdirs); excessdirs = 0; } if (excessdirs > 0) check_maps(cg_inosused(newcg), cg_inosused(oldcg), inomapsize, oldcg->cg_cgx * (ufs2_daddr_t)fs->fs_ipg, "DIR", freedirs, 0, excessdirs, usesysctl); check_maps(cg_inosused(newcg), cg_inosused(oldcg), inomapsize, oldcg->cg_cgx * (ufs2_daddr_t)fs->fs_ipg, "FILE", freefiles, excessdirs, fs->fs_ipg, usesysctl); check_maps(cg_blksfree(oldcg), cg_blksfree(newcg), howmany(fs->fs_fpg, CHAR_BIT), oldcg->cg_cgx * (ufs2_daddr_t)fs->fs_fpg, "FRAG", freeblks, 0, fs->fs_fpg, usesysctl); } static void check_maps( u_char *map1, /* map of claimed allocations */ u_char *map2, /* map of determined allocations */ int mapsize, /* size of above two maps */ ufs2_daddr_t startvalue, /* resource value for first element in map */ const char *name, /* name of resource found in maps */ int *opcode, /* sysctl opcode to free resource */ int skip, /* number of entries to skip before starting to free */ int limit, /* limit on number of entries to free */ int usesysctl) /* 1 => use sysctl interface to update maps */ { # define BUFSIZE 16 char buf[BUFSIZE]; long i, j, k, l, m, size; ufs2_daddr_t n, astart, aend, ustart, uend; void (*msg)(const char *fmt, ...); if (usesysctl) msg = pfatal; else msg = pwarn; astart = ustart = aend = uend = -1; for (i = 0; i < mapsize; i++) { j = *map1++; k = *map2++; if (j == k) continue; for (m = 0, l = 1; m < CHAR_BIT; m++, l <<= 1) { if ((j & l) == (k & l)) continue; n = startvalue + i * CHAR_BIT + m; if ((j & l) != 0) { if (astart == -1) { astart = aend = n; continue; } if (aend + 1 == n) { aend = n; continue; } if (astart == aend) (*msg)("ALLOCATED %s %" PRId64 " MARKED FREE\n", name, astart); else (*msg)("%s %sS %" PRId64 "-%" PRId64 " MARKED FREE\n", "ALLOCATED", name, astart, aend); astart = aend = n; } else { if (ustart == -1) { ustart = uend = n; continue; } if (uend + 1 == n) { uend = n; continue; } size = uend - ustart + 1; if (size <= skip) { skip -= size; ustart = uend = n; continue; } if (skip > 0) { ustart += skip; size -= skip; skip = 0; } if (size > limit) size = limit; if (debug && size == 1) pwarn("%s %s %" PRId64 " MARKED USED\n", "UNALLOCATED", name, ustart); else if (debug) pwarn("%s %sS %" PRId64 "-%" PRId64 " MARKED USED\n", "UNALLOCATED", name, ustart, ustart + size - 1); if (usesysctl != 0) { cmd.value = ustart; cmd.size = size; if (sysctl(opcode, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) { snprintf(buf, BUFSIZE, "FREE %s", name); rwerror(buf, cmd.value); } } limit -= size; if (limit <= 0) return; ustart = uend = n; } } } if (astart != -1) { if (astart == aend) (*msg)("ALLOCATED %s %" PRId64 " MARKED FREE\n", name, astart); else (*msg)("ALLOCATED %sS %" PRId64 "-%" PRId64 " MARKED FREE\n", name, astart, aend); } if (ustart != -1) { size = uend - ustart + 1; if (size <= skip) return; if (skip > 0) { ustart += skip; size -= skip; } if (size > limit) size = limit; if (debug) { if (size == 1) pwarn("UNALLOCATED %s %" PRId64 " MARKED USED\n", name, ustart); else pwarn("UNALLOCATED %sS %" PRId64 "-%" PRId64 " MARKED USED\n", name, ustart, ustart + size - 1); } if (usesysctl != 0) { cmd.value = ustart; cmd.size = size; if (sysctl(opcode, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) { snprintf(buf, BUFSIZE, "FREE %s", name); rwerror(buf, cmd.value); } } } } static void clear_blocks(ufs2_daddr_t start, ufs2_daddr_t end) { if (debug) printf("Zero frags %jd to %jd\n", start, end); if (Zflag) blzero(fswritefd, fsbtodb(&sblock, start), lfragtosize(&sblock, end - start + 1)); if (Eflag) blerase(fswritefd, fsbtodb(&sblock, start), lfragtosize(&sblock, end - start + 1)); } Index: projects/clang700-import/sbin/fsirand/fsirand.c =================================================================== --- projects/clang700-import/sbin/fsirand/fsirand.c (revision 340426) +++ projects/clang700-import/sbin/fsirand/fsirand.c (revision 340427) @@ -1,234 +1,235 @@ /* $OpenBSD: fsirand.c,v 1.9 1997/02/28 00:46:33 millert Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Todd C. Miller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Todd C. Miller. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void usage(void) __dead2; int fsirand(char *); static int printonly = 0, force = 0, ignorelabel = 0; int main(int argc, char *argv[]) { int n, ex = 0; struct rlimit rl; while ((n = getopt(argc, argv, "bfp")) != -1) { switch (n) { case 'b': ignorelabel++; break; case 'p': printonly++; break; case 'f': force++; break; default: usage(); } } if (argc - optind < 1) usage(); srandomdev(); /* Increase our data size to the max */ if (getrlimit(RLIMIT_DATA, &rl) == 0) { rl.rlim_cur = rl.rlim_max; if (setrlimit(RLIMIT_DATA, &rl) < 0) warn("can't get resource limit to max data size"); } else warn("can't get resource limit for data size"); for (n = optind; n < argc; n++) { if (argc - optind != 1) (void)puts(argv[n]); ex += fsirand(argv[n]); if (n < argc - 1) putchar('\n'); } exit(ex); } int fsirand(char *device) { struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; caddr_t inodebuf; ssize_t ibufsize; struct fs *sblock; ino_t inumber; ufs2_daddr_t dblk; int devfd, n, cg, ret; u_int32_t bsize = DEV_BSIZE; if ((devfd = open(device, printonly ? O_RDONLY : O_RDWR)) < 0) { warn("can't open %s", device); return (1); } dp1 = NULL; dp2 = NULL; /* Read in master superblock */ if ((ret = sbget(devfd, &sblock, -1)) != 0) { switch (ret) { case ENOENT: warn("Cannot find file system superblock"); return (1); default: warn("Unable to read file system superblock"); return (1); } } if (sblock->fs_magic == FS_UFS1_MAGIC && sblock->fs_old_inodefmt < FS_44INODEFMT) { warnx("file system format is too old, sorry"); return (1); } if (!force && !printonly && sblock->fs_clean != 1) { warnx("file system is not clean, fsck %s first", device); return (1); } /* XXX - should really cap buffer at 512kb or so */ if (sblock->fs_magic == FS_UFS1_MAGIC) ibufsize = sizeof(struct ufs1_dinode) * sblock->fs_ipg; else ibufsize = sizeof(struct ufs2_dinode) * sblock->fs_ipg; if ((inodebuf = malloc(ibufsize)) == NULL) errx(1, "can't allocate memory for inode buffer"); if (printonly && (sblock->fs_id[0] || sblock->fs_id[1])) { if (sblock->fs_id[0]) (void)printf("%s was randomized on %s", device, ctime((void *)&(sblock->fs_id[0]))); (void)printf("fsid: %x %x\n", sblock->fs_id[0], sblock->fs_id[1]); } /* Randomize fs_id unless old 4.2BSD file system */ if (!printonly) { /* Randomize fs_id and write out new sblock and backups */ sblock->fs_id[0] = (u_int32_t)time(NULL); sblock->fs_id[1] = random(); if (sbput(devfd, sblock, sblock->fs_ncg) != 0) { warn("could not write updated superblock"); return (1); } } /* For each cylinder group, randomize inodes and update backup sblock */ - for (cg = 0, inumber = 0; cg < (int)sblock->fs_ncg; cg++) { + for (cg = 0, inumber = UFS_ROOTINO; cg < (int)sblock->fs_ncg; cg++) { /* Read in inodes, then print or randomize generation nums */ dblk = fsbtodb(sblock, ino_to_fsba(sblock, inumber)); if (lseek(devfd, (off_t)dblk * bsize, SEEK_SET) < 0) { warn("can't seek to %jd", (intmax_t)dblk * bsize); return (1); } else if ((n = read(devfd, inodebuf, ibufsize)) != ibufsize) { warnx("can't read inodes: %s", (n < ibufsize) ? "short read" : strerror(errno)); return (1); } - for (n = 0; n < (int)sblock->fs_ipg; n++, inumber++) { - if (sblock->fs_magic == FS_UFS1_MAGIC) - dp1 = &((struct ufs1_dinode *)inodebuf)[n]; - else - dp2 = &((struct ufs2_dinode *)inodebuf)[n]; - if (inumber >= UFS_ROOTINO) { - if (printonly) - (void)printf("ino %ju gen %08x\n", - (uintmax_t)inumber, - sblock->fs_magic == FS_UFS1_MAGIC ? - dp1->di_gen : dp2->di_gen); - else if (sblock->fs_magic == FS_UFS1_MAGIC) - dp1->di_gen = random(); - else - dp2->di_gen = random(); + dp1 = (struct ufs1_dinode *)(void *)inodebuf; + dp2 = (struct ufs2_dinode *)(void *)inodebuf; + for (n = cg > 0 ? 0 : UFS_ROOTINO; + n < (int)sblock->fs_ipg; + n++, inumber++) { + if (printonly) { + (void)printf("ino %ju gen %08x\n", + (uintmax_t)inumber, + sblock->fs_magic == FS_UFS1_MAGIC ? + dp1->di_gen : dp2->di_gen); + } else if (sblock->fs_magic == FS_UFS1_MAGIC) { + dp1->di_gen = arc4random(); + dp1++; + } else { + dp2->di_gen = arc4random(); + dp2++; } } /* Write out modified inodes */ if (!printonly) { if (lseek(devfd, (off_t)dblk * bsize, SEEK_SET) < 0) { warn("can't seek to %jd", (intmax_t)dblk * bsize); return (1); } else if ((n = write(devfd, inodebuf, ibufsize)) != ibufsize) { warnx("can't write inodes: %s", (n != ibufsize) ? "short write" : strerror(errno)); return (1); } } } (void)close(devfd); return(0); } static void usage(void) { (void)fprintf(stderr, "usage: fsirand [-b] [-f] [-p] special [special ...]\n"); exit(1); } Index: projects/clang700-import/sbin/growfs/growfs.c =================================================================== --- projects/clang700-import/sbin/growfs/growfs.c (revision 340426) +++ projects/clang700-import/sbin/growfs/growfs.c (revision 340427) @@ -1,1737 +1,1755 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1980, 1989, 1993 The Regents of the University of California. * Copyright (c) 2000 Christoph Herrmann, Thomas-Henning von Kamptz * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This code is derived from software contributed to Berkeley by * Christoph Herrmann and Thomas-Henning von Kamptz, Munich and Frankfurt. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgment: * This product includes software developed by the University of * California, Berkeley and its contributors, as well as Christoph * Herrmann and Thomas-Henning von Kamptz. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $TSHeader: src/sbin/growfs/growfs.c,v 1.5 2000/12/12 19:31:00 tomsoft Exp $ * */ #ifndef lint static const char copyright[] = "@(#) Copyright (c) 2000 Christoph Herrmann, Thomas-Henning von Kamptz\n\ Copyright (c) 1980, 1989, 1993 The Regents of the University of California.\n\ All rights reserved.\n"; #endif /* not lint */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "debug.h" #ifdef FS_DEBUG int _dbg_lvl_ = (DL_INFO); /* DL_TRC */ #endif /* FS_DEBUG */ static union { struct fs fs; char pad[SBLOCKSIZE]; } fsun1, fsun2; #define sblock fsun1.fs /* the new superblock */ #define osblock fsun2.fs /* the old superblock */ static union { struct cg cg; char pad[MAXBSIZE]; } cgun1, cgun2; #define acg cgun1.cg /* a cylinder cgroup (new) */ #define aocg cgun2.cg /* an old cylinder group */ static struct csum *fscs; /* cylinder summary */ static void growfs(int, int, unsigned int); static void rdfs(ufs2_daddr_t, size_t, void *, int); static void wtfs(ufs2_daddr_t, size_t, void *, int, unsigned int); static int charsperline(void); static void usage(void); static int isblock(struct fs *, unsigned char *, int); static void clrblock(struct fs *, unsigned char *, int); static void setblock(struct fs *, unsigned char *, int); static void initcg(int, time_t, int, unsigned int); static void updjcg(int, time_t, int, int, unsigned int); static void updcsloc(time_t, int, int, unsigned int); static void frag_adjust(ufs2_daddr_t, int); static void updclst(int); static void mount_reload(const struct statfs *stfs); static void cgckhash(struct cg *); /* * Here we actually start growing the file system. We basically read the * cylinder summary from the first cylinder group as we want to update * this on the fly during our various operations. First we handle the * changes in the former last cylinder group. Afterwards we create all new * cylinder groups. Now we handle the cylinder group containing the * cylinder summary which might result in a relocation of the whole * structure. In the end we write back the updated cylinder summary, the * new superblock, and slightly patched versions of the super block * copies. */ static void growfs(int fsi, int fso, unsigned int Nflag) { DBG_FUNC("growfs") time_t modtime; uint cylno; int i, j, width; char tmpbuf[100]; DBG_ENTER; time(&modtime); /* * Get the cylinder summary into the memory. */ fscs = (struct csum *)calloc((size_t)1, (size_t)sblock.fs_cssize); if (fscs == NULL) errx(1, "calloc failed"); memcpy(fscs, osblock.fs_csp, osblock.fs_cssize); free(osblock.fs_csp); osblock.fs_csp = NULL; sblock.fs_csp = fscs; #ifdef FS_DEBUG { struct csum *dbg_csp; u_int32_t dbg_csc; char dbg_line[80]; dbg_csp = fscs; for (dbg_csc = 0; dbg_csc < osblock.fs_ncg; dbg_csc++) { snprintf(dbg_line, sizeof(dbg_line), "%d. old csum in old location", dbg_csc); DBG_DUMP_CSUM(&osblock, dbg_line, dbg_csp++); } } #endif /* FS_DEBUG */ DBG_PRINT0("fscs read\n"); /* * Do all needed changes in the former last cylinder group. */ updjcg(osblock.fs_ncg - 1, modtime, fsi, fso, Nflag); /* * Dump out summary information about file system. */ #ifdef FS_DEBUG #define B2MBFACTOR (1 / (1024.0 * 1024.0)) printf("growfs: %.1fMB (%jd sectors) block size %d, fragment size %d\n", (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR, (intmax_t)fsbtodb(&sblock, sblock.fs_size), sblock.fs_bsize, sblock.fs_fsize); printf("\tusing %d cylinder groups of %.2fMB, %d blks, %d inodes.\n", sblock.fs_ncg, (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR, sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg); if (sblock.fs_flags & FS_DOSOFTDEP) printf("\twith soft updates\n"); #undef B2MBFACTOR #endif /* FS_DEBUG */ /* * Now build the cylinders group blocks and * then print out indices of cylinder groups. */ printf("super-block backups (for fsck_ffs -b #) at:\n"); i = 0; width = charsperline(); /* * Iterate for only the new cylinder groups. */ for (cylno = osblock.fs_ncg; cylno < sblock.fs_ncg; cylno++) { initcg(cylno, modtime, fso, Nflag); j = sprintf(tmpbuf, " %jd%s", (intmax_t)fsbtodb(&sblock, cgsblock(&sblock, cylno)), cylno < (sblock.fs_ncg - 1) ? "," : "" ); if (i + j >= width) { printf("\n"); i = 0; } i += j; printf("%s", tmpbuf); fflush(stdout); } printf("\n"); /* * Do all needed changes in the first cylinder group. * allocate blocks in new location */ updcsloc(modtime, fsi, fso, Nflag); /* * Clean up the dynamic fields in our superblock. * * XXX * The following fields are currently distributed from the superblock * to the copies: * fs_minfree * fs_rotdelay * fs_maxcontig * fs_maxbpg * fs_minfree, * fs_optim * fs_flags * * We probably should rather change the summary for the cylinder group * statistics here to the value of what would be in there, if the file * system were created initially with the new size. Therefor we still * need to find an easy way of calculating that. * Possibly we can try to read the first superblock copy and apply the * "diffed" stats between the old and new superblock by still copying * certain parameters onto that. */ sblock.fs_time = modtime; sblock.fs_fmod = 0; sblock.fs_clean = 1; sblock.fs_ronly = 0; sblock.fs_cgrotor = 0; sblock.fs_state = 0; memset((void *)&sblock.fs_fsmnt, 0, sizeof(sblock.fs_fsmnt)); /* * Now write the new superblock, its summary information, * and all the alternates back to disk. */ if (!Nflag && sbput(fso, &sblock, sblock.fs_ncg) != 0) errc(2, EIO, "could not write updated superblock"); DBG_PRINT0("fscs written\n"); #ifdef FS_DEBUG { struct csum *dbg_csp; u_int32_t dbg_csc; char dbg_line[80]; dbg_csp = fscs; for (dbg_csc = 0; dbg_csc < sblock.fs_ncg; dbg_csc++) { snprintf(dbg_line, sizeof(dbg_line), "%d. new csum in new location", dbg_csc); DBG_DUMP_CSUM(&sblock, dbg_line, dbg_csp++); } } #endif /* FS_DEBUG */ DBG_PRINT0("sblock written\n"); DBG_DUMP_FS(&sblock, "new initial sblock"); DBG_PRINT0("sblock copies written\n"); DBG_DUMP_FS(&sblock, "new other sblocks"); DBG_LEAVE; return; } /* * This creates a new cylinder group structure, for more details please see * the source of newfs(8), as this function is taken over almost unchanged. * As this is never called for the first cylinder group, the special * provisions for that case are removed here. */ static void initcg(int cylno, time_t modtime, int fso, unsigned int Nflag) { DBG_FUNC("initcg") static caddr_t iobuf; + static long iobufsize; long blkno, start; ino_t ino; ufs2_daddr_t i, cbase, dmax; struct ufs1_dinode *dp1; + struct ufs2_dinode *dp2; struct csum *cs; uint j, d, dupper, dlower; - if (iobuf == NULL && (iobuf = malloc(sblock.fs_bsize * 3)) == NULL) - errx(37, "panic: cannot allocate I/O buffer"); - + if (iobuf == NULL) { + iobufsize = 2 * sblock.fs_bsize; + if ((iobuf = malloc(iobufsize)) == NULL) + errx(37, "panic: cannot allocate I/O buffer"); + memset(iobuf, '\0', iobufsize); + } /* * Determine block bounds for cylinder group. * Allow space for super block summary information in first * cylinder group. */ cbase = cgbase(&sblock, cylno); dmax = cbase + sblock.fs_fpg; if (dmax > sblock.fs_size) dmax = sblock.fs_size; dlower = cgsblock(&sblock, cylno) - cbase; dupper = cgdmin(&sblock, cylno) - cbase; if (cylno == 0) /* XXX fscs may be relocated */ dupper += howmany(sblock.fs_cssize, sblock.fs_fsize); cs = &fscs[cylno]; memset(&acg, 0, sblock.fs_cgsize); acg.cg_time = modtime; acg.cg_magic = CG_MAGIC; acg.cg_cgx = cylno; acg.cg_niblk = sblock.fs_ipg; acg.cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock)); acg.cg_ndblk = dmax - cbase; if (sblock.fs_contigsumsize > 0) acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag; start = &acg.cg_space[0] - (u_char *)(&acg.cg_firstfield); if (sblock.fs_magic == FS_UFS2_MAGIC) { acg.cg_iusedoff = start; } else { acg.cg_old_ncyl = sblock.fs_old_cpg; acg.cg_old_time = acg.cg_time; acg.cg_time = 0; acg.cg_old_niblk = acg.cg_niblk; acg.cg_niblk = 0; acg.cg_initediblk = 0; acg.cg_old_btotoff = start; acg.cg_old_boff = acg.cg_old_btotoff + sblock.fs_old_cpg * sizeof(int32_t); acg.cg_iusedoff = acg.cg_old_boff + sblock.fs_old_cpg * sizeof(u_int16_t); } acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT); acg.cg_nextfreeoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT); if (sblock.fs_contigsumsize > 0) { acg.cg_clustersumoff = roundup(acg.cg_nextfreeoff, sizeof(u_int32_t)); acg.cg_clustersumoff -= sizeof(u_int32_t); acg.cg_clusteroff = acg.cg_clustersumoff + (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t); acg.cg_nextfreeoff = acg.cg_clusteroff + howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT); } if (acg.cg_nextfreeoff > (unsigned)sblock.fs_cgsize) { /* * This should never happen as we would have had that panic * already on file system creation */ errx(37, "panic: cylinder group too big"); } acg.cg_cs.cs_nifree += sblock.fs_ipg; if (cylno == 0) for (ino = 0; ino < UFS_ROOTINO; ino++) { setbit(cg_inosused(&acg), ino); acg.cg_cs.cs_nifree--; } /* + * Initialize the initial inode blocks. + */ + dp1 = (struct ufs1_dinode *)(void *)iobuf; + dp2 = (struct ufs2_dinode *)(void *)iobuf; + for (i = 0; i < acg.cg_initediblk; i++) { + if (sblock.fs_magic == FS_UFS1_MAGIC) { + dp1->di_gen = arc4random(); + dp1++; + } else { + dp2->di_gen = arc4random(); + dp2++; + } + } + wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno)), iobufsize, iobuf, + fso, Nflag); + /* * For the old file system, we have to initialize all the inodes. */ - if (sblock.fs_magic == FS_UFS1_MAGIC) { - bzero(iobuf, sblock.fs_bsize); - for (i = 0; i < sblock.fs_ipg / INOPF(&sblock); - i += sblock.fs_frag) { + if (sblock.fs_magic == FS_UFS1_MAGIC && + sblock.fs_ipg > 2 * INOPB(&sblock)) { + for (i = 2 * sblock.fs_frag; + i < sblock.fs_ipg / INOPF(&sblock); + i += sblock.fs_frag) { dp1 = (struct ufs1_dinode *)(void *)iobuf; for (j = 0; j < INOPB(&sblock); j++) { dp1->di_gen = arc4random(); dp1++; } wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno) + i), sblock.fs_bsize, iobuf, fso, Nflag); } } if (cylno > 0) { /* * In cylno 0, beginning space is reserved * for boot and super blocks. */ for (d = 0; d < dlower; d += sblock.fs_frag) { blkno = d / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), blkno); if (sblock.fs_contigsumsize > 0) setbit(cg_clustersfree(&acg), blkno); acg.cg_cs.cs_nbfree++; } sblock.fs_dsize += dlower; } sblock.fs_dsize += acg.cg_ndblk - dupper; if ((i = dupper % sblock.fs_frag)) { acg.cg_frsum[sblock.fs_frag - i]++; for (d = dupper + sblock.fs_frag - i; dupper < d; dupper++) { setbit(cg_blksfree(&acg), dupper); acg.cg_cs.cs_nffree++; } } for (d = dupper; d + sblock.fs_frag <= acg.cg_ndblk; d += sblock.fs_frag) { blkno = d / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), blkno); if (sblock.fs_contigsumsize > 0) setbit(cg_clustersfree(&acg), blkno); acg.cg_cs.cs_nbfree++; } if (d < acg.cg_ndblk) { acg.cg_frsum[acg.cg_ndblk - d]++; for (; d < acg.cg_ndblk; d++) { setbit(cg_blksfree(&acg), d); acg.cg_cs.cs_nffree++; } } if (sblock.fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(&acg); u_char *mapp = cg_clustersfree(&acg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < acg.cg_nclusterblks; i++) { if ((map & bit) != 0) run++; else if (run != 0) { if (run > sblock.fs_contigsumsize) run = sblock.fs_contigsumsize; sump[run]++; run = 0; } if ((i & (CHAR_BIT - 1)) != CHAR_BIT - 1) bit <<= 1; else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > sblock.fs_contigsumsize) run = sblock.fs_contigsumsize; sump[run]++; } } sblock.fs_cstotal.cs_ndir += acg.cg_cs.cs_ndir; sblock.fs_cstotal.cs_nffree += acg.cg_cs.cs_nffree; sblock.fs_cstotal.cs_nbfree += acg.cg_cs.cs_nbfree; sblock.fs_cstotal.cs_nifree += acg.cg_cs.cs_nifree; *cs = acg.cg_cs; cgckhash(&acg); - memcpy(iobuf, &acg, sblock.fs_cgsize); - memset(iobuf + sblock.fs_cgsize, '\0', - sblock.fs_bsize * 3 - sblock.fs_cgsize); - - wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), - sblock.fs_bsize * 3, iobuf, fso, Nflag); + wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), sblock.fs_cgsize, &acg, + fso, Nflag); DBG_DUMP_CG(&sblock, "new cg", &acg); DBG_LEAVE; return; } /* * Here we add or subtract (sign +1/-1) the available fragments in a given * block to or from the fragment statistics. By subtracting before and adding * after an operation on the free frag map we can easy update the fragment * statistic, which seems to be otherwise a rather complex operation. */ static void frag_adjust(ufs2_daddr_t frag, int sign) { DBG_FUNC("frag_adjust") int fragsize; int f; DBG_ENTER; fragsize = 0; /* * Here frag only needs to point to any fragment in the block we want * to examine. */ for (f = rounddown(frag, sblock.fs_frag); f < roundup(frag + 1, sblock.fs_frag); f++) { /* * Count contiguous free fragments. */ if (isset(cg_blksfree(&acg), f)) { fragsize++; } else { if (fragsize && fragsize < sblock.fs_frag) { /* * We found something in between. */ acg.cg_frsum[fragsize] += sign; DBG_PRINT2("frag_adjust [%d]+=%d\n", fragsize, sign); } fragsize = 0; } } if (fragsize && fragsize < sblock.fs_frag) { /* * We found something. */ acg.cg_frsum[fragsize] += sign; DBG_PRINT2("frag_adjust [%d]+=%d\n", fragsize, sign); } DBG_PRINT2("frag_adjust [[%d]]+=%d\n", fragsize, sign); DBG_LEAVE; return; } /* * Here we do all needed work for the former last cylinder group. It has to be * changed in any case, even if the file system ended exactly on the end of * this group, as there is some slightly inconsistent handling of the number * of cylinders in the cylinder group. We start again by reading the cylinder * group from disk. If the last block was not fully available, we first handle * the missing fragments, then we handle all new full blocks in that file * system and finally we handle the new last fragmented block in the file * system. We again have to handle the fragment statistics rotational layout * tables and cluster summary during all those operations. */ static void updjcg(int cylno, time_t modtime, int fsi, int fso, unsigned int Nflag) { DBG_FUNC("updjcg") ufs2_daddr_t cbase, dmax, dupper; struct csum *cs; int i, k; int j = 0; DBG_ENTER; /* * Read the former last (joining) cylinder group from disk, and make * a copy. */ rdfs(fsbtodb(&osblock, cgtod(&osblock, cylno)), (size_t)osblock.fs_cgsize, (void *)&aocg, fsi); DBG_PRINT0("jcg read\n"); DBG_DUMP_CG(&sblock, "old joining cg", &aocg); memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2)); /* * If the cylinder group had already its new final size almost * nothing is to be done ... except: * For some reason the value of cg_ncyl in the last cylinder group has * to be zero instead of fs_cpg. As this is now no longer the last * cylinder group we have to change that value now to fs_cpg. */ if (cgbase(&osblock, cylno + 1) == osblock.fs_size) { if (sblock.fs_magic == FS_UFS1_MAGIC) acg.cg_old_ncyl = sblock.fs_old_cpg; wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), (size_t)sblock.fs_cgsize, (void *)&acg, fso, Nflag); DBG_PRINT0("jcg written\n"); DBG_DUMP_CG(&sblock, "new joining cg", &acg); DBG_LEAVE; return; } /* * Set up some variables needed later. */ cbase = cgbase(&sblock, cylno); dmax = cbase + sblock.fs_fpg; if (dmax > sblock.fs_size) dmax = sblock.fs_size; dupper = cgdmin(&sblock, cylno) - cbase; if (cylno == 0) /* XXX fscs may be relocated */ dupper += howmany(sblock.fs_cssize, sblock.fs_fsize); /* * Set pointer to the cylinder summary for our cylinder group. */ cs = fscs + cylno; /* * Touch the cylinder group, update all fields in the cylinder group as * needed, update the free space in the superblock. */ acg.cg_time = modtime; if ((unsigned)cylno == sblock.fs_ncg - 1) { /* * This is still the last cylinder group. */ if (sblock.fs_magic == FS_UFS1_MAGIC) acg.cg_old_ncyl = sblock.fs_old_ncyl % sblock.fs_old_cpg; } else { acg.cg_old_ncyl = sblock.fs_old_cpg; } DBG_PRINT2("jcg dbg: %d %u", cylno, sblock.fs_ncg); #ifdef FS_DEBUG if (sblock.fs_magic == FS_UFS1_MAGIC) DBG_PRINT2("%d %u", acg.cg_old_ncyl, sblock.fs_old_cpg); #endif DBG_PRINT0("\n"); acg.cg_ndblk = dmax - cbase; sblock.fs_dsize += acg.cg_ndblk - aocg.cg_ndblk; if (sblock.fs_contigsumsize > 0) acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag; /* * Now we have to update the free fragment bitmap for our new free * space. There again we have to handle the fragmentation and also * the rotational layout tables and the cluster summary. This is * also done per fragment for the first new block if the old file * system end was not on a block boundary, per fragment for the new * last block if the new file system end is not on a block boundary, * and per block for all space in between. * * Handle the first new block here if it was partially available * before. */ if (osblock.fs_size % sblock.fs_frag) { if (roundup(osblock.fs_size, sblock.fs_frag) <= sblock.fs_size) { /* * The new space is enough to fill at least this * block */ j = 0; for (i = roundup(osblock.fs_size - cbase, sblock.fs_frag) - 1; i >= osblock.fs_size - cbase; i--) { setbit(cg_blksfree(&acg), i); acg.cg_cs.cs_nffree++; j++; } /* * Check if the fragment just created could join an * already existing fragment at the former end of the * file system. */ if (isblock(&sblock, cg_blksfree(&acg), ((osblock.fs_size - cgbase(&sblock, cylno)) / sblock.fs_frag))) { /* * The block is now completely available. */ DBG_PRINT0("block was\n"); acg.cg_frsum[osblock.fs_size % sblock.fs_frag]--; acg.cg_cs.cs_nbfree++; acg.cg_cs.cs_nffree -= sblock.fs_frag; k = rounddown(osblock.fs_size - cbase, sblock.fs_frag); updclst((osblock.fs_size - cbase) / sblock.fs_frag); } else { /* * Lets rejoin a possible partially growed * fragment. */ k = 0; while (isset(cg_blksfree(&acg), i) && (i >= rounddown(osblock.fs_size - cbase, sblock.fs_frag))) { i--; k++; } if (k) acg.cg_frsum[k]--; acg.cg_frsum[k + j]++; } } else { /* * We only grow by some fragments within this last * block. */ for (i = sblock.fs_size - cbase - 1; i >= osblock.fs_size - cbase; i--) { setbit(cg_blksfree(&acg), i); acg.cg_cs.cs_nffree++; j++; } /* * Lets rejoin a possible partially growed fragment. */ k = 0; while (isset(cg_blksfree(&acg), i) && (i >= rounddown(osblock.fs_size - cbase, sblock.fs_frag))) { i--; k++; } if (k) acg.cg_frsum[k]--; acg.cg_frsum[k + j]++; } } /* * Handle all new complete blocks here. */ for (i = roundup(osblock.fs_size - cbase, sblock.fs_frag); i + sblock.fs_frag <= dmax - cbase; /* XXX <= or only < ? */ i += sblock.fs_frag) { j = i / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), j); updclst(j); acg.cg_cs.cs_nbfree++; } /* * Handle the last new block if there are stll some new fragments left. * Here we don't have to bother about the cluster summary or the even * the rotational layout table. */ if (i < (dmax - cbase)) { acg.cg_frsum[dmax - cbase - i]++; for (; i < dmax - cbase; i++) { setbit(cg_blksfree(&acg), i); acg.cg_cs.cs_nffree++; } } sblock.fs_cstotal.cs_nffree += (acg.cg_cs.cs_nffree - aocg.cg_cs.cs_nffree); sblock.fs_cstotal.cs_nbfree += (acg.cg_cs.cs_nbfree - aocg.cg_cs.cs_nbfree); /* * The following statistics are not changed here: * sblock.fs_cstotal.cs_ndir * sblock.fs_cstotal.cs_nifree * As the statistics for this cylinder group are ready, copy it to * the summary information array. */ *cs = acg.cg_cs; /* * Write the updated "joining" cylinder group back to disk. */ cgckhash(&acg); wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), (size_t)sblock.fs_cgsize, (void *)&acg, fso, Nflag); DBG_PRINT0("jcg written\n"); DBG_DUMP_CG(&sblock, "new joining cg", &acg); DBG_LEAVE; return; } /* * Here we update the location of the cylinder summary. We have two possible * ways of growing the cylinder summary: * (1) We can try to grow the summary in the current location, and relocate * possibly used blocks within the current cylinder group. * (2) Alternatively we can relocate the whole cylinder summary to the first * new completely empty cylinder group. Once the cylinder summary is no * longer in the beginning of the first cylinder group you should never * use a version of fsck which is not aware of the possibility to have * this structure in a non standard place. * Option (2) is considered to be less intrusive to the structure of the file- * system, so that's the one being used. */ static void updcsloc(time_t modtime, int fsi, int fso, unsigned int Nflag) { DBG_FUNC("updcsloc") struct csum *cs; int ocscg, ncscg; ufs2_daddr_t d; int lcs = 0; int block; DBG_ENTER; if (howmany(sblock.fs_cssize, sblock.fs_fsize) == howmany(osblock.fs_cssize, osblock.fs_fsize)) { /* * No new fragment needed. */ DBG_LEAVE; return; } ocscg = dtog(&osblock, osblock.fs_csaddr); cs = fscs + ocscg; /* * Read original cylinder group from disk, and make a copy. * XXX If Nflag is set in some very rare cases we now miss * some changes done in updjcg by reading the unmodified * block from disk. */ rdfs(fsbtodb(&osblock, cgtod(&osblock, ocscg)), (size_t)osblock.fs_cgsize, (void *)&aocg, fsi); DBG_PRINT0("oscg read\n"); DBG_DUMP_CG(&sblock, "old summary cg", &aocg); memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2)); /* * Touch the cylinder group, set up local variables needed later * and update the superblock. */ acg.cg_time = modtime; /* * XXX In the case of having active snapshots we may need much more * blocks for the copy on write. We need each block twice, and * also up to 8*3 blocks for indirect blocks for all possible * references. */ /* * There is not enough space in the old cylinder group to * relocate all blocks as needed, so we relocate the whole * cylinder group summary to a new group. We try to use the * first complete new cylinder group just created. Within the * cylinder group we align the area immediately after the * cylinder group information location in order to be as * close as possible to the original implementation of ffs. * * First we have to make sure we'll find enough space in the * new cylinder group. If not, then we currently give up. * We start with freeing everything which was used by the * fragments of the old cylinder summary in the current group. * Now we write back the group meta data, read in the needed * meta data from the new cylinder group, and start allocating * within that group. Here we can assume, the group to be * completely empty. Which makes the handling of fragments and * clusters a lot easier. */ DBG_TRC; if (sblock.fs_ncg - osblock.fs_ncg < 2) errx(2, "panic: not enough space"); /* * Point "d" to the first fragment not used by the cylinder * summary. */ d = osblock.fs_csaddr + (osblock.fs_cssize / osblock.fs_fsize); /* * Set up last cluster size ("lcs") already here. Calculate * the size for the trailing cluster just behind where "d" * points to. */ if (sblock.fs_contigsumsize > 0) { for (block = howmany(d % sblock.fs_fpg, sblock.fs_frag), lcs = 0; lcs < sblock.fs_contigsumsize; block++, lcs++) { if (isclr(cg_clustersfree(&acg), block)) break; } } /* * Point "d" to the last frag used by the cylinder summary. */ d--; DBG_PRINT1("d=%jd\n", (intmax_t)d); if ((d + 1) % sblock.fs_frag) { /* * The end of the cylinder summary is not a complete * block. */ DBG_TRC; frag_adjust(d % sblock.fs_fpg, -1); for (; (d + 1) % sblock.fs_frag; d--) { DBG_PRINT1("d=%jd\n", (intmax_t)d); setbit(cg_blksfree(&acg), d % sblock.fs_fpg); acg.cg_cs.cs_nffree++; sblock.fs_cstotal.cs_nffree++; } /* * Point "d" to the last fragment of the last * (incomplete) block of the cylinder summary. */ d++; frag_adjust(d % sblock.fs_fpg, 1); if (isblock(&sblock, cg_blksfree(&acg), (d % sblock.fs_fpg) / sblock.fs_frag)) { DBG_PRINT1("d=%jd\n", (intmax_t)d); acg.cg_cs.cs_nffree -= sblock.fs_frag; acg.cg_cs.cs_nbfree++; sblock.fs_cstotal.cs_nffree -= sblock.fs_frag; sblock.fs_cstotal.cs_nbfree++; if (sblock.fs_contigsumsize > 0) { setbit(cg_clustersfree(&acg), (d % sblock.fs_fpg) / sblock.fs_frag); if (lcs < sblock.fs_contigsumsize) { if (lcs) cg_clustersum(&acg)[lcs]--; lcs++; cg_clustersum(&acg)[lcs]++; } } } /* * Point "d" to the first fragment of the block before * the last incomplete block. */ d--; } DBG_PRINT1("d=%jd\n", (intmax_t)d); for (d = rounddown(d, sblock.fs_frag); d >= osblock.fs_csaddr; d -= sblock.fs_frag) { DBG_TRC; DBG_PRINT1("d=%jd\n", (intmax_t)d); setblock(&sblock, cg_blksfree(&acg), (d % sblock.fs_fpg) / sblock.fs_frag); acg.cg_cs.cs_nbfree++; sblock.fs_cstotal.cs_nbfree++; if (sblock.fs_contigsumsize > 0) { setbit(cg_clustersfree(&acg), (d % sblock.fs_fpg) / sblock.fs_frag); /* * The last cluster size is already set up. */ if (lcs < sblock.fs_contigsumsize) { if (lcs) cg_clustersum(&acg)[lcs]--; lcs++; cg_clustersum(&acg)[lcs]++; } } } *cs = acg.cg_cs; /* * Now write the former cylinder group containing the cylinder * summary back to disk. */ wtfs(fsbtodb(&sblock, cgtod(&sblock, ocscg)), (size_t)sblock.fs_cgsize, (void *)&acg, fso, Nflag); DBG_PRINT0("oscg written\n"); DBG_DUMP_CG(&sblock, "old summary cg", &acg); /* * Find the beginning of the new cylinder group containing the * cylinder summary. */ sblock.fs_csaddr = cgdmin(&sblock, osblock.fs_ncg); ncscg = dtog(&sblock, sblock.fs_csaddr); cs = fscs + ncscg; /* * If Nflag is specified, we would now read random data instead * of an empty cg structure from disk. So we can't simulate that * part for now. */ if (Nflag) { DBG_PRINT0("nscg update skipped\n"); DBG_LEAVE; return; } /* * Read the future cylinder group containing the cylinder * summary from disk, and make a copy. */ rdfs(fsbtodb(&sblock, cgtod(&sblock, ncscg)), (size_t)sblock.fs_cgsize, (void *)&aocg, fsi); DBG_PRINT0("nscg read\n"); DBG_DUMP_CG(&sblock, "new summary cg", &aocg); memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2)); /* * Allocate all complete blocks used by the new cylinder * summary. */ for (d = sblock.fs_csaddr; d + sblock.fs_frag <= sblock.fs_csaddr + (sblock.fs_cssize / sblock.fs_fsize); d += sblock.fs_frag) { clrblock(&sblock, cg_blksfree(&acg), (d % sblock.fs_fpg) / sblock.fs_frag); acg.cg_cs.cs_nbfree--; sblock.fs_cstotal.cs_nbfree--; if (sblock.fs_contigsumsize > 0) { clrbit(cg_clustersfree(&acg), (d % sblock.fs_fpg) / sblock.fs_frag); } } /* * Allocate all fragments used by the cylinder summary in the * last block. */ if (d < sblock.fs_csaddr + (sblock.fs_cssize / sblock.fs_fsize)) { for (; d - sblock.fs_csaddr < sblock.fs_cssize/sblock.fs_fsize; d++) { clrbit(cg_blksfree(&acg), d % sblock.fs_fpg); acg.cg_cs.cs_nffree--; sblock.fs_cstotal.cs_nffree--; } acg.cg_cs.cs_nbfree--; acg.cg_cs.cs_nffree += sblock.fs_frag; sblock.fs_cstotal.cs_nbfree--; sblock.fs_cstotal.cs_nffree += sblock.fs_frag; if (sblock.fs_contigsumsize > 0) clrbit(cg_clustersfree(&acg), (d % sblock.fs_fpg) / sblock.fs_frag); frag_adjust(d % sblock.fs_fpg, 1); } /* * XXX Handle the cluster statistics here in the case this * cylinder group is now almost full, and the remaining * space is less then the maximum cluster size. This is * probably not needed, as you would hardly find a file * system which has only MAXCSBUFS+FS_MAXCONTIG of free * space right behind the cylinder group information in * any new cylinder group. */ /* * Update our statistics in the cylinder summary. */ *cs = acg.cg_cs; /* * Write the new cylinder group containing the cylinder summary * back to disk. */ wtfs(fsbtodb(&sblock, cgtod(&sblock, ncscg)), (size_t)sblock.fs_cgsize, (void *)&acg, fso, Nflag); DBG_PRINT0("nscg written\n"); DBG_DUMP_CG(&sblock, "new summary cg", &acg); DBG_LEAVE; return; } /* * Here we read some block(s) from disk. */ static void rdfs(ufs2_daddr_t bno, size_t size, void *bf, int fsi) { DBG_FUNC("rdfs") ssize_t n; DBG_ENTER; if (bno < 0) err(32, "rdfs: attempting to read negative block number"); if (lseek(fsi, (off_t)bno * DEV_BSIZE, 0) < 0) err(33, "rdfs: seek error: %jd", (intmax_t)bno); n = read(fsi, bf, size); if (n != (ssize_t)size) err(34, "rdfs: read error: %jd", (intmax_t)bno); DBG_LEAVE; return; } /* * Here we write some block(s) to disk. */ static void wtfs(ufs2_daddr_t bno, size_t size, void *bf, int fso, unsigned int Nflag) { DBG_FUNC("wtfs") ssize_t n; DBG_ENTER; if (Nflag) { DBG_LEAVE; return; } if (lseek(fso, (off_t)bno * DEV_BSIZE, SEEK_SET) < 0) err(35, "wtfs: seek error: %ld", (long)bno); n = write(fso, bf, size); if (n != (ssize_t)size) err(36, "wtfs: write error: %ld", (long)bno); DBG_LEAVE; return; } /* * Here we check if all frags of a block are free. For more details again * please see the source of newfs(8), as this function is taken over almost * unchanged. */ static int isblock(struct fs *fs, unsigned char *cp, int h) { DBG_FUNC("isblock") unsigned char mask; DBG_ENTER; switch (fs->fs_frag) { case 8: DBG_LEAVE; return (cp[h] == 0xff); case 4: mask = 0x0f << ((h & 0x1) << 2); DBG_LEAVE; return ((cp[h >> 1] & mask) == mask); case 2: mask = 0x03 << ((h & 0x3) << 1); DBG_LEAVE; return ((cp[h >> 2] & mask) == mask); case 1: mask = 0x01 << (h & 0x7); DBG_LEAVE; return ((cp[h >> 3] & mask) == mask); default: fprintf(stderr, "isblock bad fs_frag %d\n", fs->fs_frag); DBG_LEAVE; return (0); } } /* * Here we allocate a complete block in the block map. For more details again * please see the source of newfs(8), as this function is taken over almost * unchanged. */ static void clrblock(struct fs *fs, unsigned char *cp, int h) { DBG_FUNC("clrblock") DBG_ENTER; switch ((fs)->fs_frag) { case 8: cp[h] = 0; break; case 4: cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); break; case 2: cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); break; case 1: cp[h >> 3] &= ~(0x01 << (h & 0x7)); break; default: warnx("clrblock bad fs_frag %d", fs->fs_frag); break; } DBG_LEAVE; return; } /* * Here we free a complete block in the free block map. For more details again * please see the source of newfs(8), as this function is taken over almost * unchanged. */ static void setblock(struct fs *fs, unsigned char *cp, int h) { DBG_FUNC("setblock") DBG_ENTER; switch (fs->fs_frag) { case 8: cp[h] = 0xff; break; case 4: cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); break; case 2: cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); break; case 1: cp[h >> 3] |= (0x01 << (h & 0x7)); break; default: warnx("setblock bad fs_frag %d", fs->fs_frag); break; } DBG_LEAVE; return; } /* * Figure out how many lines our current terminal has. For more details again * please see the source of newfs(8), as this function is taken over almost * unchanged. */ static int charsperline(void) { DBG_FUNC("charsperline") int columns; char *cp; struct winsize ws; DBG_ENTER; columns = 0; if (ioctl(0, TIOCGWINSZ, &ws) != -1) columns = ws.ws_col; if (columns == 0 && (cp = getenv("COLUMNS"))) columns = atoi(cp); if (columns == 0) columns = 80; /* last resort */ DBG_LEAVE; return (columns); } static int is_dev(const char *name) { struct stat devstat; if (stat(name, &devstat) != 0) return (0); if (!S_ISCHR(devstat.st_mode)) return (0); return (1); } /* * Return mountpoint on which the device is currently mounted. */ static const struct statfs * dev_to_statfs(const char *dev) { struct stat devstat, mntdevstat; struct statfs *mntbuf, *statfsp; char device[MAXPATHLEN]; char *mntdevname; int i, mntsize; /* * First check the mounted filesystems. */ if (stat(dev, &devstat) != 0) return (NULL); if (!S_ISCHR(devstat.st_mode) && !S_ISBLK(devstat.st_mode)) return (NULL); mntsize = getmntinfo(&mntbuf, MNT_NOWAIT); for (i = 0; i < mntsize; i++) { statfsp = &mntbuf[i]; mntdevname = statfsp->f_mntfromname; if (*mntdevname != '/') { strcpy(device, _PATH_DEV); strcat(device, mntdevname); mntdevname = device; } if (stat(mntdevname, &mntdevstat) == 0 && mntdevstat.st_rdev == devstat.st_rdev) return (statfsp); } return (NULL); } static const char * mountpoint_to_dev(const char *mountpoint) { struct statfs *mntbuf, *statfsp; struct fstab *fs; int i, mntsize; /* * First check the mounted filesystems. */ mntsize = getmntinfo(&mntbuf, MNT_NOWAIT); for (i = 0; i < mntsize; i++) { statfsp = &mntbuf[i]; if (strcmp(statfsp->f_mntonname, mountpoint) == 0) return (statfsp->f_mntfromname); } /* * Check the fstab. */ fs = getfsfile(mountpoint); if (fs != NULL) return (fs->fs_spec); return (NULL); } static const char * getdev(const char *name) { static char device[MAXPATHLEN]; const char *cp, *dev; if (is_dev(name)) return (name); cp = strrchr(name, '/'); if (cp == NULL) { snprintf(device, sizeof(device), "%s%s", _PATH_DEV, name); if (is_dev(device)) return (device); } dev = mountpoint_to_dev(name); if (dev != NULL && is_dev(dev)) return (dev); return (NULL); } /* * growfs(8) is a utility which allows to increase the size of an existing * ufs file system. Currently this can only be done on unmounted file system. * It recognizes some command line options to specify the new desired size, * and it does some basic checkings. The old file system size is determined * and after some more checks like we can really access the new last block * on the disk etc. we calculate the new parameters for the superblock. After * having done this we just call growfs() which will do the work. * We still have to provide support for snapshots. Therefore we first have to * understand what data structures are always replicated in the snapshot on * creation, for all other blocks we touch during our procedure, we have to * keep the old blocks unchanged somewhere available for the snapshots. If we * are lucky, then we only have to handle our blocks to be relocated in that * way. * Also we have to consider in what order we actually update the critical * data structures of the file system to make sure, that in case of a disaster * fsck(8) is still able to restore any lost data. * The foreseen last step then will be to provide for growing even mounted * file systems. There we have to extend the mount() system call to provide * userland access to the file system locking facility. */ int main(int argc, char **argv) { DBG_FUNC("main") struct fs *fs; const char *device; const struct statfs *statfsp; uint64_t size = 0; off_t mediasize; int error, j, fsi, fso, ch, ret, Nflag = 0, yflag = 0; char *p, reply[5], oldsizebuf[6], newsizebuf[6]; void *testbuf; DBG_ENTER; while ((ch = getopt(argc, argv, "Ns:vy")) != -1) { switch(ch) { case 'N': Nflag = 1; break; case 's': size = (off_t)strtoumax(optarg, &p, 0); if (p == NULL || *p == '\0') size *= DEV_BSIZE; else if (*p == 'b' || *p == 'B') ; /* do nothing */ else if (*p == 'k' || *p == 'K') size <<= 10; else if (*p == 'm' || *p == 'M') size <<= 20; else if (*p == 'g' || *p == 'G') size <<= 30; else if (*p == 't' || *p == 'T') { size <<= 30; size <<= 10; } else errx(1, "unknown suffix on -s argument"); break; case 'v': /* for compatibility to newfs */ break; case 'y': yflag = 1; break; case '?': /* FALLTHROUGH */ default: usage(); } } argc -= optind; argv += optind; if (argc != 1) usage(); /* * Now try to guess the device name. */ device = getdev(*argv); if (device == NULL) errx(1, "cannot find special device for %s", *argv); statfsp = dev_to_statfs(device); fsi = open(device, O_RDONLY); if (fsi < 0) err(1, "%s", device); /* * Try to guess the slice size if not specified. */ if (ioctl(fsi, DIOCGMEDIASIZE, &mediasize) == -1) err(1,"DIOCGMEDIASIZE"); /* * Check if that partition is suitable for growing a file system. */ if (mediasize < 1) errx(1, "partition is unavailable"); /* * Read the current superblock, and take a backup. */ if ((ret = sbget(fsi, &fs, -1)) != 0) { switch (ret) { case ENOENT: errx(1, "superblock not recognized"); default: errc(1, ret, "unable to read superblock"); } } memcpy(&osblock, fs, fs->fs_sbsize); free(fs); memcpy((void *)&fsun1, (void *)&fsun2, osblock.fs_sbsize); DBG_OPEN("/tmp/growfs.debug"); /* already here we need a superblock */ DBG_DUMP_FS(&sblock, "old sblock"); /* * Determine size to grow to. Default to the device size. */ if (size == 0) size = mediasize; else { if (size > (uint64_t)mediasize) { humanize_number(oldsizebuf, sizeof(oldsizebuf), size, "B", HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL); humanize_number(newsizebuf, sizeof(newsizebuf), mediasize, "B", HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL); errx(1, "requested size %s is larger " "than the available %s", oldsizebuf, newsizebuf); } } /* * Make sure the new size is a multiple of fs_fsize; /dev/ufssuspend * only supports fragment-aligned IO requests. */ size -= size % osblock.fs_fsize; if (size <= (uint64_t)(osblock.fs_size * osblock.fs_fsize)) { humanize_number(oldsizebuf, sizeof(oldsizebuf), osblock.fs_size * osblock.fs_fsize, "B", HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL); humanize_number(newsizebuf, sizeof(newsizebuf), size, "B", HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL); errx(1, "requested size %s is not larger than the current " "filesystem size %s", newsizebuf, oldsizebuf); } sblock.fs_size = dbtofsb(&osblock, size / DEV_BSIZE); sblock.fs_providersize = dbtofsb(&osblock, mediasize / DEV_BSIZE); /* * Are we really growing? */ if (osblock.fs_size >= sblock.fs_size) { errx(1, "we are not growing (%jd->%jd)", (intmax_t)osblock.fs_size, (intmax_t)sblock.fs_size); } /* * Check if we find an active snapshot. */ if (yflag == 0) { for (j = 0; j < FSMAXSNAP; j++) { if (sblock.fs_snapinum[j]) { errx(1, "active snapshot found in file system; " "please remove all snapshots before " "using growfs"); } if (!sblock.fs_snapinum[j]) /* list is dense */ break; } } if (yflag == 0 && Nflag == 0) { if (statfsp != NULL && (statfsp->f_flags & MNT_RDONLY) == 0) printf("Device is mounted read-write; resizing will " "result in temporary write suspension for %s.\n", statfsp->f_mntonname); printf("It's strongly recommended to make a backup " "before growing the file system.\n" "OK to grow filesystem on %s", device); if (statfsp != NULL) printf(", mounted on %s,", statfsp->f_mntonname); humanize_number(oldsizebuf, sizeof(oldsizebuf), osblock.fs_size * osblock.fs_fsize, "B", HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL); humanize_number(newsizebuf, sizeof(newsizebuf), sblock.fs_size * sblock.fs_fsize, "B", HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL); printf(" from %s to %s? [yes/no] ", oldsizebuf, newsizebuf); fflush(stdout); fgets(reply, (int)sizeof(reply), stdin); if (strcasecmp(reply, "yes\n")){ printf("Response other than \"yes\"; aborting\n"); exit(0); } } /* * Try to access our device for writing. If it's not mounted, * or mounted read-only, simply open it; otherwise, use UFS * suspension mechanism. */ if (Nflag) { fso = -1; } else { if (statfsp != NULL && (statfsp->f_flags & MNT_RDONLY) == 0) { fso = open(_PATH_UFSSUSPEND, O_RDWR); if (fso == -1) err(1, "unable to open %s", _PATH_UFSSUSPEND); error = ioctl(fso, UFSSUSPEND, &statfsp->f_fsid); if (error != 0) err(1, "UFSSUSPEND"); } else { fso = open(device, O_WRONLY); if (fso < 0) err(1, "%s", device); } } /* * Try to access our new last block in the file system. */ testbuf = malloc(sblock.fs_fsize); if (testbuf == NULL) err(1, "malloc"); rdfs((ufs2_daddr_t)((size - sblock.fs_fsize) / DEV_BSIZE), sblock.fs_fsize, testbuf, fsi); wtfs((ufs2_daddr_t)((size - sblock.fs_fsize) / DEV_BSIZE), sblock.fs_fsize, testbuf, fso, Nflag); free(testbuf); /* * Now calculate new superblock values and check for reasonable * bound for new file system size: * fs_size: is derived from user input * fs_dsize: should get updated in the routines creating or * updating the cylinder groups on the fly * fs_cstotal: should get updated in the routines creating or * updating the cylinder groups */ /* * Update the number of cylinders and cylinder groups in the file system. */ if (sblock.fs_magic == FS_UFS1_MAGIC) { sblock.fs_old_ncyl = sblock.fs_size * sblock.fs_old_nspf / sblock.fs_old_spc; if (sblock.fs_size * sblock.fs_old_nspf > sblock.fs_old_ncyl * sblock.fs_old_spc) sblock.fs_old_ncyl++; } sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg); /* * Allocate last cylinder group only if there is enough room * for at least one data block. */ if (sblock.fs_size % sblock.fs_fpg != 0 && sblock.fs_size <= cgdmin(&sblock, sblock.fs_ncg - 1)) { humanize_number(oldsizebuf, sizeof(oldsizebuf), (sblock.fs_size % sblock.fs_fpg) * sblock.fs_fsize, "B", HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL); warnx("no room to allocate last cylinder group; " "leaving %s unused", oldsizebuf); sblock.fs_ncg--; if (sblock.fs_magic == FS_UFS1_MAGIC) sblock.fs_old_ncyl = sblock.fs_ncg * sblock.fs_old_cpg; sblock.fs_size = sblock.fs_ncg * sblock.fs_fpg; } /* * Update the space for the cylinder group summary information in the * respective cylinder group data area. */ sblock.fs_cssize = fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum)); if (osblock.fs_size >= sblock.fs_size) errx(1, "not enough new space"); DBG_PRINT0("sblock calculated\n"); /* * Ok, everything prepared, so now let's do the tricks. */ growfs(fsi, fso, Nflag); close(fsi); if (fso > -1) { if (statfsp != NULL && (statfsp->f_flags & MNT_RDONLY) == 0) { error = ioctl(fso, UFSRESUME); if (error != 0) err(1, "UFSRESUME"); } error = close(fso); if (error != 0) err(1, "close"); if (statfsp != NULL && (statfsp->f_flags & MNT_RDONLY) != 0) mount_reload(statfsp); } DBG_CLOSE; DBG_LEAVE; return (0); } /* * Dump a line of usage. */ static void usage(void) { DBG_FUNC("usage") DBG_ENTER; fprintf(stderr, "usage: growfs [-Ny] [-s size] special | filesystem\n"); DBG_LEAVE; exit(1); } /* * This updates most parameters and the bitmap related to cluster. We have to * assume that sblock, osblock, acg are set up. */ static void updclst(int block) { DBG_FUNC("updclst") static int lcs = 0; DBG_ENTER; if (sblock.fs_contigsumsize < 1) /* no clustering */ return; /* * update cluster allocation map */ setbit(cg_clustersfree(&acg), block); /* * update cluster summary table */ if (!lcs) { /* * calculate size for the trailing cluster */ for (block--; lcs < sblock.fs_contigsumsize; block--, lcs++ ) { if (isclr(cg_clustersfree(&acg), block)) break; } } if (lcs < sblock.fs_contigsumsize) { if (lcs) cg_clustersum(&acg)[lcs]--; lcs++; cg_clustersum(&acg)[lcs]++; } DBG_LEAVE; return; } static void mount_reload(const struct statfs *stfs) { char errmsg[255]; struct iovec *iov; int iovlen; iov = NULL; iovlen = 0; *errmsg = '\0'; build_iovec(&iov, &iovlen, "fstype", __DECONST(char *, "ffs"), 4); build_iovec(&iov, &iovlen, "fspath", __DECONST(char *, stfs->f_mntonname), (size_t)-1); build_iovec(&iov, &iovlen, "errmsg", errmsg, sizeof(errmsg)); build_iovec(&iov, &iovlen, "update", NULL, 0); build_iovec(&iov, &iovlen, "reload", NULL, 0); if (nmount(iov, iovlen, stfs->f_flags) < 0) { errmsg[sizeof(errmsg) - 1] = '\0'; err(9, "%s: cannot reload filesystem%s%s", stfs->f_mntonname, *errmsg != '\0' ? ": " : "", errmsg); } } /* * Calculate the check-hash of the cylinder group. */ static void cgckhash(struct cg *cgp) { if ((sblock.fs_metackhash & CK_CYLGRP) == 0) return; cgp->cg_ckhash = 0; cgp->cg_ckhash = calculate_crc32c(~0L, (void *)cgp, sblock.fs_cgsize); } Index: projects/clang700-import/sbin/ipfw/ipfw.8 =================================================================== --- projects/clang700-import/sbin/ipfw/ipfw.8 (revision 340426) +++ projects/clang700-import/sbin/ipfw/ipfw.8 (revision 340427) @@ -1,4668 +1,4668 @@ .\" .\" $FreeBSD$ .\" -.Dd November 12, 2018 +.Dd November 13, 2018 .Dt IPFW 8 .Os .Sh NAME .Nm ipfw .Nd User interface for firewall, traffic shaper, packet scheduler, in-kernel NAT. .Sh SYNOPSIS .Ss FIREWALL CONFIGURATION .Nm .Op Fl cq .Cm add .Ar rule .Nm .Op Fl acdefnNStT .Op Cm set Ar N .Brq Cm list | show .Op Ar rule | first-last ... .Nm .Op Fl f | q .Op Cm set Ar N .Cm flush .Nm .Op Fl q .Op Cm set Ar N .Brq Cm delete | zero | resetlog .Op Ar number ... .Pp .Nm .Cm set Oo Cm disable Ar number ... Oc Op Cm enable Ar number ... .Nm .Cm set move .Op Cm rule .Ar number Cm to Ar number .Nm .Cm set swap Ar number number .Nm .Cm set show .Ss SYSCTL SHORTCUTS .Nm .Cm enable .Brq Cm firewall | altq | one_pass | debug | verbose | dyn_keepalive .Nm .Cm disable .Brq Cm firewall | altq | one_pass | debug | verbose | dyn_keepalive .Ss LOOKUP TABLES .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm create Ar create-options .Nm .Oo Cm set Ar N Oc Cm table .Brq Ar name | all .Cm destroy .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm modify Ar modify-options .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm swap Ar name .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm add Ar table-key Op Ar value .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm add Op Ar table-key Ar value ... .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm atomic add Op Ar table-key Ar value ... .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm delete Op Ar table-key ... .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm lookup Ar addr .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm lock .Nm .Oo Cm set Ar N Oc Cm table Ar name Cm unlock .Nm .Oo Cm set Ar N Oc Cm table .Brq Ar name | all .Cm list .Nm .Oo Cm set Ar N Oc Cm table .Brq Ar name | all .Cm info .Nm .Oo Cm set Ar N Oc Cm table .Brq Ar name | all .Cm detail .Nm .Oo Cm set Ar N Oc Cm table .Brq Ar name | all .Cm flush .Ss DUMMYNET CONFIGURATION (TRAFFIC SHAPER AND PACKET SCHEDULER) .Nm .Brq Cm pipe | queue | sched .Ar number .Cm config .Ar config-options .Nm .Op Fl s Op Ar field .Brq Cm pipe | queue | sched .Brq Cm delete | list | show .Op Ar number ... .Ss IN-KERNEL NAT .Nm .Op Fl q .Cm nat .Ar number .Cm config .Ar config-options -.Pp -.Nm -.Op Fl cfnNqS -.Oo -.Fl p Ar preproc -.Oo -.Ar preproc-flags -.Oc -.Oc -.Ar pathname .Ss STATEFUL IPv6/IPv4 NETWORK ADDRESS AND PROTOCOL TRANSLATION .Nm .Oo Cm set Ar N Oc Cm nat64lsn Ar name Cm create Ar create-options .Nm .Oo Cm set Ar N Oc Cm nat64lsn Ar name Cm config Ar config-options .Nm .Oo Cm set Ar N Oc Cm nat64lsn .Brq Ar name | all .Brq Cm list | show .Op Cm states .Nm .Oo Cm set Ar N Oc Cm nat64lsn .Brq Ar name | all .Cm destroy .Nm .Oo Cm set Ar N Oc Cm nat64lsn Ar name Cm stats Op Cm reset .Ss STATELESS IPv6/IPv4 NETWORK ADDRESS AND PROTOCOL TRANSLATION .Nm .Oo Cm set Ar N Oc Cm nat64stl Ar name Cm create Ar create-options .Nm .Oo Cm set Ar N Oc Cm nat64stl Ar name Cm config Ar config-options .Nm .Oo Cm set Ar N Oc Cm nat64stl .Brq Ar name | all .Brq Cm list | show .Nm .Oo Cm set Ar N Oc Cm nat64stl .Brq Ar name | all .Cm destroy .Nm .Oo Cm set Ar N Oc Cm nat64stl Ar name Cm stats Op Cm reset .Ss IPv6-to-IPv6 NETWORK PREFIX TRANSLATION .Nm .Oo Cm set Ar N Oc Cm nptv6 Ar name Cm create Ar create-options .Nm .Oo Cm set Ar N Oc Cm nptv6 .Brq Ar name | all .Brq Cm list | show .Nm .Oo Cm set Ar N Oc Cm nptv6 .Brq Ar name | all .Cm destroy .Nm .Oo Cm set Ar N Oc Cm nptv6 Ar name Cm stats Op Cm reset .Ss INTERNAL DIAGNOSTICS .Nm .Cm internal iflist .Nm .Cm internal talist .Nm .Cm internal vlist +.Ss LIST OF RULES AND PREPROCESSING +.Nm +.Op Fl cfnNqS +.Oo +.Fl p Ar preproc +.Oo +.Ar preproc-flags +.Oc +.Oc +.Ar pathname .Sh DESCRIPTION The .Nm utility is the user interface for controlling the .Xr ipfw 4 firewall, the .Xr dummynet 4 traffic shaper/packet scheduler, and the in-kernel NAT services. .Pp A firewall configuration, or .Em ruleset , is made of a list of .Em rules numbered from 1 to 65535. Packets are passed to the firewall from a number of different places in the protocol stack (depending on the source and destination of the packet, it is possible for the firewall to be invoked multiple times on the same packet). The packet passed to the firewall is compared against each of the rules in the .Em ruleset , in rule-number order (multiple rules with the same number are permitted, in which case they are processed in order of insertion). When a match is found, the action corresponding to the matching rule is performed. .Pp Depending on the action and certain system settings, packets can be reinjected into the firewall at some rule after the matching one for further processing. .Pp A ruleset always includes a .Em default rule (numbered 65535) which cannot be modified or deleted, and matches all packets. The action associated with the .Em default rule can be either .Cm deny or .Cm allow depending on how the kernel is configured. .Pp If the ruleset includes one or more rules with the .Cm keep-state , .Cm record-state , .Cm limit or .Cm set-limit option, the firewall will have a .Em stateful behaviour, i.e., upon a match it will create .Em dynamic rules , i.e., rules that match packets with the same 5-tuple (protocol, source and destination addresses and ports) as the packet which caused their creation. Dynamic rules, which have a limited lifetime, are checked at the first occurrence of a .Cm check-state , .Cm keep-state or .Cm limit rule, and are typically used to open the firewall on-demand to legitimate traffic only. Please, note, that .Cm keep-state amd .Cm limit imply implicit .Cm check-state for all packets (not only these matched by the rule) but .Cm record-state and .Cm set-limit have no implicit .Cm check-state . See the .Sx STATEFUL FIREWALL and .Sx EXAMPLES Sections below for more information on the stateful behaviour of .Nm . .Pp All rules (including dynamic ones) have a few associated counters: a packet count, a byte count, a log count and a timestamp indicating the time of the last match. Counters can be displayed or reset with .Nm commands. .Pp Each rule belongs to one of 32 different .Em sets , and there are .Nm commands to atomically manipulate sets, such as enable, disable, swap sets, move all rules in a set to another one, delete all rules in a set. These can be useful to install temporary configurations, or to test them. See Section .Sx SETS OF RULES for more information on .Em sets . .Pp Rules can be added with the .Cm add command; deleted individually or in groups with the .Cm delete command, and globally (except those in set 31) with the .Cm flush command; displayed, optionally with the content of the counters, using the .Cm show and .Cm list commands. Finally, counters can be reset with the .Cm zero and .Cm resetlog commands. .Pp .Ss COMMAND OPTIONS The following general options are available when invoking .Nm : .Bl -tag -width indent .It Fl a Show counter values when listing rules. The .Cm show command implies this option. .It Fl b Only show the action and the comment, not the body of a rule. Implies .Fl c . .It Fl c When entering or showing rules, print them in compact form, i.e., omitting the "ip from any to any" string when this does not carry any additional information. .It Fl d When listing, show dynamic rules in addition to static ones. .It Fl e When listing and .Fl d is specified, also show expired dynamic rules. .It Fl f Run without prompting for confirmation for commands that can cause problems if misused, i.e., .Cm flush . If there is no tty associated with the process, this is implied. The .Cm delete command with this flag ignores possible errors, i.e., nonexistent rule number. And for batched commands execution continues with the next command. .It Fl i When listing a table (see the .Sx LOOKUP TABLES section below for more information on lookup tables), format values as IP addresses. By default, values are shown as integers. .It Fl n Only check syntax of the command strings, without actually passing them to the kernel. .It Fl N Try to resolve addresses and service names in output. .It Fl q Be quiet when executing the .Cm add , .Cm nat , .Cm zero , .Cm resetlog or .Cm flush commands; (implies .Fl f ) . This is useful when updating rulesets by executing multiple .Nm commands in a script (e.g., .Ql sh\ /etc/rc.firewall ) , or by processing a file with many .Nm rules across a remote login session. It also stops a table add or delete from failing if the entry already exists or is not present. .Pp The reason why this option may be important is that for some of these actions, .Nm may print a message; if the action results in blocking the traffic to the remote client, the remote login session will be closed and the rest of the ruleset will not be processed. Access to the console would then be required to recover. .It Fl S When listing rules, show the .Em set each rule belongs to. If this flag is not specified, disabled rules will not be listed. .It Fl s Op Ar field When listing pipes, sort according to one of the four counters (total or current packets or bytes). .It Fl t When listing, show last match timestamp converted with ctime(). .It Fl T When listing, show last match timestamp as seconds from the epoch. This form can be more convenient for postprocessing by scripts. .El .Ss LIST OF RULES AND PREPROCESSING To ease configuration, rules can be put into a file which is processed using .Nm as shown in the last synopsis line. An absolute .Ar pathname must be used. The file will be read line by line and applied as arguments to the .Nm utility. .Pp Optionally, a preprocessor can be specified using .Fl p Ar preproc where .Ar pathname is to be piped through. Useful preprocessors include .Xr cpp 1 and .Xr m4 1 . If .Ar preproc does not start with a slash .Pq Ql / as its first character, the usual .Ev PATH name search is performed. Care should be taken with this in environments where not all file systems are mounted (yet) by the time .Nm is being run (e.g.\& when they are mounted over NFS). Once .Fl p has been specified, any additional arguments are passed on to the preprocessor for interpretation. This allows for flexible configuration files (like conditionalizing them on the local hostname) and the use of macros to centralize frequently required arguments like IP addresses. .Ss TRAFFIC SHAPER CONFIGURATION The .Nm .Cm pipe , queue and .Cm sched commands are used to configure the traffic shaper and packet scheduler. See the .Sx TRAFFIC SHAPER (DUMMYNET) CONFIGURATION Section below for details. .Pp If the world and the kernel get out of sync the .Nm ABI may break, preventing you from being able to add any rules. This can adversely affect the booting process. You can use .Nm .Cm disable .Cm firewall to temporarily disable the firewall to regain access to the network, allowing you to fix the problem. .Sh PACKET FLOW A packet is checked against the active ruleset in multiple places in the protocol stack, under control of several sysctl variables. These places and variables are shown below, and it is important to have this picture in mind in order to design a correct ruleset. .Bd -literal -offset indent ^ to upper layers V | | +----------->-----------+ ^ V [ip(6)_input] [ip(6)_output] net.inet(6).ip(6).fw.enable=1 | | ^ V [ether_demux] [ether_output_frame] net.link.ether.ipfw=1 | | +-->--[bdg_forward]-->--+ net.link.bridge.ipfw=1 ^ V | to devices | .Ed .Pp The number of times the same packet goes through the firewall can vary between 0 and 4 depending on packet source and destination, and system configuration. .Pp Note that as packets flow through the stack, headers can be stripped or added to it, and so they may or may not be available for inspection. E.g., incoming packets will include the MAC header when .Nm is invoked from .Cm ether_demux() , but the same packets will have the MAC header stripped off when .Nm is invoked from .Cm ip_input() or .Cm ip6_input() . .Pp Also note that each packet is always checked against the complete ruleset, irrespective of the place where the check occurs, or the source of the packet. If a rule contains some match patterns or actions which are not valid for the place of invocation (e.g.\& trying to match a MAC header within .Cm ip_input or .Cm ip6_input ), the match pattern will not match, but a .Cm not operator in front of such patterns .Em will cause the pattern to .Em always match on those packets. It is thus the responsibility of the programmer, if necessary, to write a suitable ruleset to differentiate among the possible places. .Cm skipto rules can be useful here, as an example: .Bd -literal -offset indent # packets from ether_demux or bdg_forward ipfw add 10 skipto 1000 all from any to any layer2 in # packets from ip_input ipfw add 10 skipto 2000 all from any to any not layer2 in # packets from ip_output ipfw add 10 skipto 3000 all from any to any not layer2 out # packets from ether_output_frame ipfw add 10 skipto 4000 all from any to any layer2 out .Ed .Pp (yes, at the moment there is no way to differentiate between ether_demux and bdg_forward). .Pp Also note that only actions .Cm allow, .Cm deny, .Cm netgraph, .Cm ngtee and related to .Cm dummynet are processed for .Cm layer2 frames and all other actions act as if they were .Cm allow for such frames. Full set of actions is supported for IP packets without .Cm layer2 headers only. For example, .Cm divert action does not divert .Cm layer2 frames. .Sh SYNTAX In general, each keyword or argument must be provided as a separate command line argument, with no leading or trailing spaces. Keywords are case-sensitive, whereas arguments may or may not be case-sensitive depending on their nature (e.g.\& uid's are, hostnames are not). .Pp Some arguments (e.g., port or address lists) are comma-separated lists of values. In this case, spaces after commas ',' are allowed to make the line more readable. You can also put the entire command (including flags) into a single argument. E.g., the following forms are equivalent: .Bd -literal -offset indent ipfw -q add deny src-ip 10.0.0.0/24,127.0.0.1/8 ipfw -q add deny src-ip 10.0.0.0/24, 127.0.0.1/8 ipfw "-q add deny src-ip 10.0.0.0/24, 127.0.0.1/8" .Ed .Sh RULE FORMAT The format of firewall rules is the following: .Bd -ragged -offset indent .Bk -words .Op Ar rule_number .Op Cm set Ar set_number .Op Cm prob Ar match_probability .Ar action .Op Cm log Op Cm logamount Ar number .Op Cm altq Ar queue .Oo .Bro Cm tag | untag .Brc Ar number .Oc .Ar body .Ek .Ed .Pp where the body of the rule specifies which information is used for filtering packets, among the following: .Pp .Bl -tag -width "Source and dest. addresses and ports" -offset XXX -compact .It Layer-2 header fields When available .It IPv4 and IPv6 Protocol SCTP, TCP, UDP, ICMP, etc. .It Source and dest. addresses and ports .It Direction See Section .Sx PACKET FLOW .It Transmit and receive interface By name or address .It Misc. IP header fields Version, type of service, datagram length, identification, fragment flag (non-zero IP offset), Time To Live .It IP options .It IPv6 Extension headers Fragmentation, Hop-by-Hop options, Routing Headers, Source routing rthdr0, Mobile IPv6 rthdr2, IPSec options. .It IPv6 Flow-ID .It Misc. TCP header fields TCP flags (SYN, FIN, ACK, RST, etc.), sequence number, acknowledgment number, window .It TCP options .It ICMP types for ICMP packets .It ICMP6 types for ICMP6 packets .It User/group ID When the packet can be associated with a local socket. .It Divert status Whether a packet came from a divert socket (e.g., .Xr natd 8 ) . .It Fib annotation state Whether a packet has been tagged for using a specific FIB (routing table) in future forwarding decisions. .El .Pp Note that some of the above information, e.g.\& source MAC or IP addresses and TCP/UDP ports, can be easily spoofed, so filtering on those fields alone might not guarantee the desired results. .Bl -tag -width indent .It Ar rule_number Each rule is associated with a .Ar rule_number in the range 1..65535, with the latter reserved for the .Em default rule. Rules are checked sequentially by rule number. Multiple rules can have the same number, in which case they are checked (and listed) according to the order in which they have been added. If a rule is entered without specifying a number, the kernel will assign one in such a way that the rule becomes the last one before the .Em default rule. Automatic rule numbers are assigned by incrementing the last non-default rule number by the value of the sysctl variable .Ar net.inet.ip.fw.autoinc_step which defaults to 100. If this is not possible (e.g.\& because we would go beyond the maximum allowed rule number), the number of the last non-default value is used instead. .It Cm set Ar set_number Each rule is associated with a .Ar set_number in the range 0..31. Sets can be individually disabled and enabled, so this parameter is of fundamental importance for atomic ruleset manipulation. It can be also used to simplify deletion of groups of rules. If a rule is entered without specifying a set number, set 0 will be used. .br Set 31 is special in that it cannot be disabled, and rules in set 31 are not deleted by the .Nm ipfw flush command (but you can delete them with the .Nm ipfw delete set 31 command). Set 31 is also used for the .Em default rule. .It Cm prob Ar match_probability A match is only declared with the specified probability (floating point number between 0 and 1). This can be useful for a number of applications such as random packet drop or (in conjunction with .Nm dummynet ) to simulate the effect of multiple paths leading to out-of-order packet delivery. .Pp Note: this condition is checked before any other condition, including ones such as .Cm keep-state or .Cm check-state which might have side effects. .It Cm log Op Cm logamount Ar number Packets matching a rule with the .Cm log keyword will be made available for logging in two ways: if the sysctl variable .Va net.inet.ip.fw.verbose is set to 0 (default), one can use .Xr bpf 4 attached to the .Li ipfw0 pseudo interface. This pseudo interface can be created after a boot manually by using the following command: .Bd -literal -offset indent # ifconfig ipfw0 create .Ed .Pp Or, automatically at boot time by adding the following line to the .Xr rc.conf 5 file: .Bd -literal -offset indent firewall_logif="YES" .Ed .Pp There is no overhead if no .Xr bpf 4 is attached to the pseudo interface. .Pp If .Va net.inet.ip.fw.verbose is set to 1, packets will be logged to .Xr syslogd 8 with a .Dv LOG_SECURITY facility up to a maximum of .Cm logamount packets. If no .Cm logamount is specified, the limit is taken from the sysctl variable .Va net.inet.ip.fw.verbose_limit . In both cases, a value of 0 means unlimited logging. .Pp Once the limit is reached, logging can be re-enabled by clearing the logging counter or the packet counter for that entry, see the .Cm resetlog command. .Pp Note: logging is done after all other packet matching conditions have been successfully verified, and before performing the final action (accept, deny, etc.) on the packet. .It Cm tag Ar number When a packet matches a rule with the .Cm tag keyword, the numeric tag for the given .Ar number in the range 1..65534 will be attached to the packet. The tag acts as an internal marker (it is not sent out over the wire) that can be used to identify these packets later on. This can be used, for example, to provide trust between interfaces and to start doing policy-based filtering. A packet can have multiple tags at the same time. Tags are "sticky", meaning once a tag is applied to a packet by a matching rule it exists until explicit removal. Tags are kept with the packet everywhere within the kernel, but are lost when packet leaves the kernel, for example, on transmitting packet out to the network or sending packet to a .Xr divert 4 socket. .Pp To check for previously applied tags, use the .Cm tagged rule option. To delete previously applied tag, use the .Cm untag keyword. .Pp Note: since tags are kept with the packet everywhere in kernelspace, they can be set and unset anywhere in the kernel network subsystem (using the .Xr mbuf_tags 9 facility), not only by means of the .Xr ipfw 4 .Cm tag and .Cm untag keywords. For example, there can be a specialized .Xr netgraph 4 node doing traffic analyzing and tagging for later inspecting in firewall. .It Cm untag Ar number When a packet matches a rule with the .Cm untag keyword, the tag with the number .Ar number is searched among the tags attached to this packet and, if found, removed from it. Other tags bound to packet, if present, are left untouched. .It Cm altq Ar queue When a packet matches a rule with the .Cm altq keyword, the ALTQ identifier for the given .Ar queue (see .Xr altq 4 ) will be attached. Note that this ALTQ tag is only meaningful for packets going "out" of IPFW, and not being rejected or going to divert sockets. Note that if there is insufficient memory at the time the packet is processed, it will not be tagged, so it is wise to make your ALTQ "default" queue policy account for this. If multiple .Cm altq rules match a single packet, only the first one adds the ALTQ classification tag. In doing so, traffic may be shaped by using .Cm count Cm altq Ar queue rules for classification early in the ruleset, then later applying the filtering decision. For example, .Cm check-state and .Cm keep-state rules may come later and provide the actual filtering decisions in addition to the fallback ALTQ tag. .Pp You must run .Xr pfctl 8 to set up the queues before IPFW will be able to look them up by name, and if the ALTQ disciplines are rearranged, the rules in containing the queue identifiers in the kernel will likely have gone stale and need to be reloaded. Stale queue identifiers will probably result in misclassification. .Pp All system ALTQ processing can be turned on or off via .Nm .Cm enable Ar altq and .Nm .Cm disable Ar altq . The usage of .Va net.inet.ip.fw.one_pass is irrelevant to ALTQ traffic shaping, as the actual rule action is followed always after adding an ALTQ tag. .El .Ss RULE ACTIONS A rule can be associated with one of the following actions, which will be executed when the packet matches the body of the rule. .Bl -tag -width indent .It Cm allow | accept | pass | permit Allow packets that match rule. The search terminates. .It Cm check-state Op Ar :flowname | Cm :any Checks the packet against the dynamic ruleset. If a match is found, execute the action associated with the rule which generated this dynamic rule, otherwise move to the next rule. .br .Cm Check-state rules do not have a body. If no .Cm check-state rule is found, the dynamic ruleset is checked at the first .Cm keep-state or .Cm limit rule. The .Ar :flowname is symbolic name assigned to dynamic rule by .Cm keep-state opcode. The special flowname .Cm :any can be used to ignore states flowname when matching. The .Cm :default keyword is special name used for compatibility with old rulesets. .It Cm count Update counters for all packets that match rule. The search continues with the next rule. .It Cm deny | drop Discard packets that match this rule. The search terminates. .It Cm divert Ar port Divert packets that match this rule to the .Xr divert 4 socket bound to port .Ar port . The search terminates. .It Cm fwd | forward Ar ipaddr | tablearg Ns Op , Ns Ar port Change the next-hop on matching packets to .Ar ipaddr , which can be an IP address or a host name. The next hop can also be supplied by the last table looked up for the packet by using the .Cm tablearg keyword instead of an explicit address. The search terminates if this rule matches. .Pp If .Ar ipaddr is a local address, then matching packets will be forwarded to .Ar port (or the port number in the packet if one is not specified in the rule) on the local machine. .br If .Ar ipaddr is not a local address, then the port number (if specified) is ignored, and the packet will be forwarded to the remote address, using the route as found in the local routing table for that IP. .br A .Ar fwd rule will not match layer-2 packets (those received on ether_input, ether_output, or bridged). .br The .Cm fwd action does not change the contents of the packet at all. In particular, the destination address remains unmodified, so packets forwarded to another system will usually be rejected by that system unless there is a matching rule on that system to capture them. For packets forwarded locally, the local address of the socket will be set to the original destination address of the packet. This makes the .Xr netstat 1 entry look rather weird but is intended for use with transparent proxy servers. .It Cm nat Ar nat_nr | tablearg Pass packet to a nat instance (for network address translation, address redirect, etc.): see the .Sx NETWORK ADDRESS TRANSLATION (NAT) Section for further information. .It Cm nat64lsn Ar name Pass packet to a stateful NAT64 instance (for IPv6/IPv4 network address and protocol translation): see the .Sx IPv6/IPv4 NETWORK ADDRESS AND PROTOCOL TRANSLATION Section for further information. .It Cm nat64stl Ar name Pass packet to a stateless NAT64 instance (for IPv6/IPv4 network address and protocol translation): see the .Sx IPv6/IPv4 NETWORK ADDRESS AND PROTOCOL TRANSLATION Section for further information. .It Cm nptv6 Ar name Pass packet to a NPTv6 instance (for IPv6-to-IPv6 network prefix translation): see the .Sx IPv6-to-IPv6 NETWORK PREFIX TRANSLATION (NPTv6) Section for further information. .It Cm pipe Ar pipe_nr Pass packet to a .Nm dummynet .Dq pipe (for bandwidth limitation, delay, etc.). See the .Sx TRAFFIC SHAPER (DUMMYNET) CONFIGURATION Section for further information. The search terminates; however, on exit from the pipe and if the .Xr sysctl 8 variable .Va net.inet.ip.fw.one_pass is not set, the packet is passed again to the firewall code starting from the next rule. .It Cm queue Ar queue_nr Pass packet to a .Nm dummynet .Dq queue (for bandwidth limitation using WF2Q+). .It Cm reject (Deprecated). Synonym for .Cm unreach host . .It Cm reset Discard packets that match this rule, and if the packet is a TCP packet, try to send a TCP reset (RST) notice. The search terminates. .It Cm reset6 Discard packets that match this rule, and if the packet is a TCP packet, try to send a TCP reset (RST) notice. The search terminates. .It Cm skipto Ar number | tablearg Skip all subsequent rules numbered less than .Ar number . The search continues with the first rule numbered .Ar number or higher. It is possible to use the .Cm tablearg keyword with a skipto for a .Em computed skipto. Skipto may work either in O(log(N)) or in O(1) depending on amount of memory and/or sysctl variables. See the .Sx SYSCTL VARIABLES section for more details. .It Cm call Ar number | tablearg The current rule number is saved in the internal stack and ruleset processing continues with the first rule numbered .Ar number or higher. If later a rule with the .Cm return action is encountered, the processing returns to the first rule with number of this .Cm call rule plus one or higher (the same behaviour as with packets returning from .Xr divert 4 socket after a .Cm divert action). This could be used to make somewhat like an assembly language .Dq subroutine calls to rules with common checks for different interfaces, etc. .Pp Rule with any number could be called, not just forward jumps as with .Cm skipto . So, to prevent endless loops in case of mistakes, both .Cm call and .Cm return actions don't do any jumps and simply go to the next rule if memory cannot be allocated or stack overflowed/underflowed. .Pp Internally stack for rule numbers is implemented using .Xr mbuf_tags 9 facility and currently has size of 16 entries. As mbuf tags are lost when packet leaves the kernel, .Cm divert should not be used in subroutines to avoid endless loops and other undesired effects. .It Cm return Takes rule number saved to internal stack by the last .Cm call action and returns ruleset processing to the first rule with number greater than number of corresponding .Cm call rule. See description of the .Cm call action for more details. .Pp Note that .Cm return rules usually end a .Dq subroutine and thus are unconditional, but .Nm command-line utility currently requires every action except .Cm check-state to have body. While it is sometimes useful to return only on some packets, usually you want to print just .Dq return for readability. A workaround for this is to use new syntax and .Fl c switch: .Bd -literal -offset indent # Add a rule without actual body ipfw add 2999 return via any # List rules without "from any to any" part ipfw -c list .Ed .Pp This cosmetic annoyance may be fixed in future releases. .It Cm tee Ar port Send a copy of packets matching this rule to the .Xr divert 4 socket bound to port .Ar port . The search continues with the next rule. .It Cm unreach Ar code Discard packets that match this rule, and try to send an ICMP unreachable notice with code .Ar code , where .Ar code is a number from 0 to 255, or one of these aliases: .Cm net , host , protocol , port , .Cm needfrag , srcfail , net-unknown , host-unknown , .Cm isolated , net-prohib , host-prohib , tosnet , .Cm toshost , filter-prohib , host-precedence or .Cm precedence-cutoff . The search terminates. .It Cm unreach6 Ar code Discard packets that match this rule, and try to send an ICMPv6 unreachable notice with code .Ar code , where .Ar code is a number from 0, 1, 3 or 4, or one of these aliases: .Cm no-route, admin-prohib, address or .Cm port . The search terminates. .It Cm netgraph Ar cookie Divert packet into netgraph with given .Ar cookie . The search terminates. If packet is later returned from netgraph it is either accepted or continues with the next rule, depending on .Va net.inet.ip.fw.one_pass sysctl variable. .It Cm ngtee Ar cookie A copy of packet is diverted into netgraph, original packet continues with the next rule. See .Xr ng_ipfw 4 for more information on .Cm netgraph and .Cm ngtee actions. .It Cm setfib Ar fibnum | tablearg The packet is tagged so as to use the FIB (routing table) .Ar fibnum in any subsequent forwarding decisions. In the current implementation, this is limited to the values 0 through 15, see .Xr setfib 2 . Processing continues at the next rule. It is possible to use the .Cm tablearg keyword with setfib. If the tablearg value is not within the compiled range of fibs, the packet's fib is set to 0. .It Cm setdscp Ar DSCP | number | tablearg Set specified DiffServ codepoint for an IPv4/IPv6 packet. Processing continues at the next rule. Supported values are: .Pp .Cm cs0 .Pq Dv 000000 , .Cm cs1 .Pq Dv 001000 , .Cm cs2 .Pq Dv 010000 , .Cm cs3 .Pq Dv 011000 , .Cm cs4 .Pq Dv 100000 , .Cm cs5 .Pq Dv 101000 , .Cm cs6 .Pq Dv 110000 , .Cm cs7 .Pq Dv 111000 , .Cm af11 .Pq Dv 001010 , .Cm af12 .Pq Dv 001100 , .Cm af13 .Pq Dv 001110 , .Cm af21 .Pq Dv 010010 , .Cm af22 .Pq Dv 010100 , .Cm af23 .Pq Dv 010110 , .Cm af31 .Pq Dv 011010 , .Cm af32 .Pq Dv 011100 , .Cm af33 .Pq Dv 011110 , .Cm af41 .Pq Dv 100010 , .Cm af42 .Pq Dv 100100 , .Cm af43 .Pq Dv 100110 , .Cm ef .Pq Dv 101110 , .Cm be .Pq Dv 000000 . Additionally, DSCP value can be specified by number (0..64). It is also possible to use the .Cm tablearg keyword with setdscp. If the tablearg value is not within the 0..64 range, lower 6 bits of supplied value are used. .It Cm tcp-setmss Ar mss Set the Maximum Segment Size (MSS) in the TCP segment to value .Ar mss . The kernel module .Cm ipfw_pmod should be loaded or kernel should have .Cm options IPFIREWALL_PMOD to be able use this action. This command does not change a packet if original MSS value is lower than specified value. Both TCP over IPv4 and over IPv6 are supported. Regardless of matched a packet or not by the .Cm tcp-setmss rule, the search continues with the next rule. .It Cm reass Queue and reassemble IPv4 fragments. If the packet is not fragmented, counters are updated and processing continues with the next rule. If the packet is the last logical fragment, the packet is reassembled and, if .Va net.inet.ip.fw.one_pass is set to 0, processing continues with the next rule. Otherwise, the packet is allowed to pass and the search terminates. If the packet is a fragment in the middle of a logical group of fragments, it is consumed and processing stops immediately. .Pp Fragment handling can be tuned via .Va net.inet.ip.maxfragpackets and .Va net.inet.ip.maxfragsperpacket which limit, respectively, the maximum number of processable fragments (default: 800) and the maximum number of fragments per packet (default: 16). .Pp NOTA BENE: since fragments do not contain port numbers, they should be avoided with the .Nm reass rule. Alternatively, direction-based (like .Nm in / .Nm out ) and source-based (like .Nm via ) match patterns can be used to select fragments. .Pp Usually a simple rule like: .Bd -literal -offset indent # reassemble incoming fragments ipfw add reass all from any to any in .Ed .Pp is all you need at the beginning of your ruleset. .It Cm abort Discard packets that match this rule, and if the packet is an SCTP packet, try to send an SCTP packet containing an ABORT chunk. The search terminates. .It Cm abort6 Discard packets that match this rule, and if the packet is an SCTP packet, try to send an SCTP packet containing an ABORT chunk. The search terminates. .El .Ss RULE BODY The body of a rule contains zero or more patterns (such as specific source and destination addresses or ports, protocol options, incoming or outgoing interfaces, etc.) that the packet must match in order to be recognised. In general, the patterns are connected by (implicit) .Cm and operators -- i.e., all must match in order for the rule to match. Individual patterns can be prefixed by the .Cm not operator to reverse the result of the match, as in .Pp .Dl "ipfw add 100 allow ip from not 1.2.3.4 to any" .Pp Additionally, sets of alternative match patterns .Pq Em or-blocks can be constructed by putting the patterns in lists enclosed between parentheses ( ) or braces { }, and using the .Cm or operator as follows: .Pp .Dl "ipfw add 100 allow ip from { x or not y or z } to any" .Pp Only one level of parentheses is allowed. Beware that most shells have special meanings for parentheses or braces, so it is advisable to put a backslash \\ in front of them to prevent such interpretations. .Pp The body of a rule must in general include a source and destination address specifier. The keyword .Ar any can be used in various places to specify that the content of a required field is irrelevant. .Pp The rule body has the following format: .Bd -ragged -offset indent .Op Ar proto Cm from Ar src Cm to Ar dst .Op Ar options .Ed .Pp The first part (proto from src to dst) is for backward compatibility with earlier versions of .Fx . In modern .Fx any match pattern (including MAC headers, IP protocols, addresses and ports) can be specified in the .Ar options section. .Pp Rule fields have the following meaning: .Bl -tag -width indent .It Ar proto : protocol | Cm { Ar protocol Cm or ... } .It Ar protocol : Oo Cm not Oc Ar protocol-name | protocol-number An IP protocol specified by number or name (for a complete list see .Pa /etc/protocols ) , or one of the following keywords: .Bl -tag -width indent .It Cm ip4 | ipv4 Matches IPv4 packets. .It Cm ip6 | ipv6 Matches IPv6 packets. .It Cm ip | all Matches any packet. .El .Pp The .Cm ipv6 in .Cm proto option will be treated as inner protocol. And, the .Cm ipv4 is not available in .Cm proto option. .Pp The .Cm { Ar protocol Cm or ... } format (an .Em or-block ) is provided for convenience only but its use is deprecated. .It Ar src No and Ar dst : Bro Cm addr | Cm { Ar addr Cm or ... } Brc Op Oo Cm not Oc Ar ports An address (or a list, see below) optionally followed by .Ar ports specifiers. .Pp The second format .Em ( or-block with multiple addresses) is provided for convenience only and its use is discouraged. .It Ar addr : Oo Cm not Oc Bro .Cm any | me | me6 | .Cm table Ns Pq Ar name Ns Op , Ns Ar value .Ar | addr-list | addr-set .Brc .Bl -tag -width indent .It Cm any matches any IP address. .It Cm me matches any IP address configured on an interface in the system. .It Cm me6 matches any IPv6 address configured on an interface in the system. The address list is evaluated at the time the packet is analysed. .It Cm table Ns Pq Ar name Ns Op , Ns Ar value Matches any IPv4 or IPv6 address for which an entry exists in the lookup table .Ar number . If an optional 32-bit unsigned .Ar value is also specified, an entry will match only if it has this value. See the .Sx LOOKUP TABLES section below for more information on lookup tables. .El .It Ar addr-list : ip-addr Ns Op Ns , Ns Ar addr-list .It Ar ip-addr : A host or subnet address specified in one of the following ways: .Bl -tag -width indent .It Ar numeric-ip | hostname Matches a single IPv4 address, specified as dotted-quad or a hostname. Hostnames are resolved at the time the rule is added to the firewall list. .It Ar addr Ns / Ns Ar masklen Matches all addresses with base .Ar addr (specified as an IP address, a network number, or a hostname) and mask width of .Cm masklen bits. As an example, 1.2.3.4/25 or 1.2.3.0/25 will match all IP numbers from 1.2.3.0 to 1.2.3.127 . .It Ar addr Ns : Ns Ar mask Matches all addresses with base .Ar addr (specified as an IP address, a network number, or a hostname) and the mask of .Ar mask , specified as a dotted quad. As an example, 1.2.3.4:255.0.255.0 or 1.0.3.0:255.0.255.0 will match 1.*.3.*. This form is advised only for non-contiguous masks. It is better to resort to the .Ar addr Ns / Ns Ar masklen format for contiguous masks, which is more compact and less error-prone. .El .It Ar addr-set : addr Ns Oo Ns / Ns Ar masklen Oc Ns Cm { Ns Ar list Ns Cm } .It Ar list : Bro Ar num | num-num Brc Ns Op Ns , Ns Ar list Matches all addresses with base address .Ar addr (specified as an IP address, a network number, or a hostname) and whose last byte is in the list between braces { } . Note that there must be no spaces between braces and numbers (spaces after commas are allowed). Elements of the list can be specified as single entries or ranges. The .Ar masklen field is used to limit the size of the set of addresses, and can have any value between 24 and 32. If not specified, it will be assumed as 24. .br This format is particularly useful to handle sparse address sets within a single rule. Because the matching occurs using a bitmask, it takes constant time and dramatically reduces the complexity of rulesets. .br As an example, an address specified as 1.2.3.4/24{128,35-55,89} or 1.2.3.0/24{128,35-55,89} will match the following IP addresses: .br 1.2.3.128, 1.2.3.35 to 1.2.3.55, 1.2.3.89 . .It Ar addr6-list : ip6-addr Ns Op Ns , Ns Ar addr6-list .It Ar ip6-addr : A host or subnet specified one of the following ways: .Bl -tag -width indent .It Ar numeric-ip | hostname Matches a single IPv6 address as allowed by .Xr inet_pton 3 or a hostname. Hostnames are resolved at the time the rule is added to the firewall list. .It Ar addr Ns / Ns Ar masklen Matches all IPv6 addresses with base .Ar addr (specified as allowed by .Xr inet_pton or a hostname) and mask width of .Cm masklen bits. .It Ar addr Ns / Ns Ar mask Matches all IPv6 addresses with base .Ar addr (specified as allowed by .Xr inet_pton or a hostname) and the mask of .Ar mask , specified as allowed by .Xr inet_pton. As an example, fe::640:0:0/ffff::ffff:ffff:0:0 will match fe:*:*:*:0:640:*:*. This form is advised only for non-contiguous masks. It is better to resort to the .Ar addr Ns / Ns Ar masklen format for contiguous masks, which is more compact and less error-prone. .El .Pp No support for sets of IPv6 addresses is provided because IPv6 addresses are typically random past the initial prefix. .It Ar ports : Bro Ar port | port Ns \&- Ns Ar port Ns Brc Ns Op , Ns Ar ports For protocols which support port numbers (such as SCTP, TCP and UDP), optional .Cm ports may be specified as one or more ports or port ranges, separated by commas but no spaces, and an optional .Cm not operator. The .Ql \&- notation specifies a range of ports (including boundaries). .Pp Service names (from .Pa /etc/services ) may be used instead of numeric port values. The length of the port list is limited to 30 ports or ranges, though one can specify larger ranges by using an .Em or-block in the .Cm options section of the rule. .Pp A backslash .Pq Ql \e can be used to escape the dash .Pq Ql - character in a service name (from a shell, the backslash must be typed twice to avoid the shell itself interpreting it as an escape character). .Pp .Dl "ipfw add count tcp from any ftp\e\e-data-ftp to any" .Pp Fragmented packets which have a non-zero offset (i.e., not the first fragment) will never match a rule which has one or more port specifications. See the .Cm frag option for details on matching fragmented packets. .El .Ss RULE OPTIONS (MATCH PATTERNS) Additional match patterns can be used within rules. Zero or more of these so-called .Em options can be present in a rule, optionally prefixed by the .Cm not operand, and possibly grouped into .Em or-blocks . .Pp The following match patterns can be used (listed in alphabetical order): .Bl -tag -width indent .It Cm // this is a comment. Inserts the specified text as a comment in the rule. Everything following // is considered as a comment and stored in the rule. You can have comment-only rules, which are listed as having a .Cm count action followed by the comment. .It Cm bridged Alias for .Cm layer2 . .It Cm defer-immediate-action | defer-action A rule with this option will not perform normal action upon a match. This option is intended to be used with .Cm record-state or .Cm keep-state as the dynamic rule, created but ignored on match, will work as intended. Rules with both .Cm record-state and .Cm defer-immediate-action create a dynamic rule and continue with the next rule without actually performing the action part of this rule. When the rule is later activated via the state table, the action is performed as usual. .It Cm diverted Matches only packets generated by a divert socket. .It Cm diverted-loopback Matches only packets coming from a divert socket back into the IP stack input for delivery. .It Cm diverted-output Matches only packets going from a divert socket back outward to the IP stack output for delivery. .It Cm dst-ip Ar ip-address Matches IPv4 packets whose destination IP is one of the address(es) specified as argument. .It Bro Cm dst-ip6 | dst-ipv6 Brc Ar ip6-address Matches IPv6 packets whose destination IP is one of the address(es) specified as argument. .It Cm dst-port Ar ports Matches IP packets whose destination port is one of the port(s) specified as argument. .It Cm established Matches TCP packets that have the RST or ACK bits set. .It Cm ext6hdr Ar header Matches IPv6 packets containing the extended header given by .Ar header . Supported headers are: .Pp Fragment, .Pq Cm frag , Hop-to-hop options .Pq Cm hopopt , any type of Routing Header .Pq Cm route , Source routing Routing Header Type 0 .Pq Cm rthdr0 , Mobile IPv6 Routing Header Type 2 .Pq Cm rthdr2 , Destination options .Pq Cm dstopt , IPSec authentication headers .Pq Cm ah , and IPsec encapsulated security payload headers .Pq Cm esp . .It Cm fib Ar fibnum Matches a packet that has been tagged to use the given FIB (routing table) number. .It Cm flow Ar table Ns Pq Ar name Ns Op , Ns Ar value Search for the flow entry in lookup table .Ar name . If not found, the match fails. Otherwise, the match succeeds and .Cm tablearg is set to the value extracted from the table. .Pp This option can be useful to quickly dispatch traffic based on certain packet fields. See the .Sx LOOKUP TABLES section below for more information on lookup tables. .It Cm flow-id Ar labels Matches IPv6 packets containing any of the flow labels given in .Ar labels . .Ar labels is a comma separated list of numeric flow labels. .It Cm frag Matches packets that are fragments and not the first fragment of an IP datagram. Note that these packets will not have the next protocol header (e.g.\& TCP, UDP) so options that look into these headers cannot match. .It Cm gid Ar group Matches all TCP or UDP packets sent by or received for a .Ar group . A .Ar group may be specified by name or number. .It Cm jail Ar jail Matches all TCP or UDP packets sent by or received for the jail whose ID or name is .Ar jail . .It Cm icmptypes Ar types Matches ICMP packets whose ICMP type is in the list .Ar types . The list may be specified as any combination of individual types (numeric) separated by commas. .Em Ranges are not allowed . The supported ICMP types are: .Pp echo reply .Pq Cm 0 , destination unreachable .Pq Cm 3 , source quench .Pq Cm 4 , redirect .Pq Cm 5 , echo request .Pq Cm 8 , router advertisement .Pq Cm 9 , router solicitation .Pq Cm 10 , time-to-live exceeded .Pq Cm 11 , IP header bad .Pq Cm 12 , timestamp request .Pq Cm 13 , timestamp reply .Pq Cm 14 , information request .Pq Cm 15 , information reply .Pq Cm 16 , address mask request .Pq Cm 17 and address mask reply .Pq Cm 18 . .It Cm icmp6types Ar types Matches ICMP6 packets whose ICMP6 type is in the list of .Ar types . The list may be specified as any combination of individual types (numeric) separated by commas. .Em Ranges are not allowed . .It Cm in | out Matches incoming or outgoing packets, respectively. .Cm in and .Cm out are mutually exclusive (in fact, .Cm out is implemented as .Cm not in Ns No ). .It Cm ipid Ar id-list Matches IPv4 packets whose .Cm ip_id field has value included in .Ar id-list , which is either a single value or a list of values or ranges specified in the same way as .Ar ports . .It Cm iplen Ar len-list Matches IP packets whose total length, including header and data, is in the set .Ar len-list , which is either a single value or a list of values or ranges specified in the same way as .Ar ports . .It Cm ipoptions Ar spec Matches packets whose IPv4 header contains the comma separated list of options specified in .Ar spec . The supported IP options are: .Pp .Cm ssrr (strict source route), .Cm lsrr (loose source route), .Cm rr (record packet route) and .Cm ts (timestamp). The absence of a particular option may be denoted with a .Ql \&! . .It Cm ipprecedence Ar precedence Matches IPv4 packets whose precedence field is equal to .Ar precedence . .It Cm ipsec Matches packets that have IPSEC history associated with them (i.e., the packet comes encapsulated in IPSEC, the kernel has IPSEC support, and can correctly decapsulate it). .Pp Note that specifying .Cm ipsec is different from specifying .Cm proto Ar ipsec as the latter will only look at the specific IP protocol field, irrespective of IPSEC kernel support and the validity of the IPSEC data. .Pp Further note that this flag is silently ignored in kernels without IPSEC support. It does not affect rule processing when given and the rules are handled as if with no .Cm ipsec flag. .It Cm iptos Ar spec Matches IPv4 packets whose .Cm tos field contains the comma separated list of service types specified in .Ar spec . The supported IP types of service are: .Pp .Cm lowdelay .Pq Dv IPTOS_LOWDELAY , .Cm throughput .Pq Dv IPTOS_THROUGHPUT , .Cm reliability .Pq Dv IPTOS_RELIABILITY , .Cm mincost .Pq Dv IPTOS_MINCOST , .Cm congestion .Pq Dv IPTOS_ECN_CE . The absence of a particular type may be denoted with a .Ql \&! . .It Cm dscp spec Ns Op , Ns Ar spec Matches IPv4/IPv6 packets whose .Cm DS field value is contained in .Ar spec mask. Multiple values can be specified via the comma separated list. Value can be one of keywords used in .Cm setdscp action or exact number. .It Cm ipttl Ar ttl-list Matches IPv4 packets whose time to live is included in .Ar ttl-list , which is either a single value or a list of values or ranges specified in the same way as .Ar ports . .It Cm ipversion Ar ver Matches IP packets whose IP version field is .Ar ver . .It Cm keep-state Op Ar :flowname Upon a match, the firewall will create a dynamic rule, whose default behaviour is to match bidirectional traffic between source and destination IP/port using the same protocol. The rule has a limited lifetime (controlled by a set of .Xr sysctl 8 variables), and the lifetime is refreshed every time a matching packet is found. The .Ar :flowname is used to assign additional to addresses, ports and protocol parameter to dynamic rule. It can be used for more accurate matching by .Cm check-state rule. The .Cm :default keyword is special name used for compatibility with old rulesets. .It Cm layer2 Matches only layer2 packets, i.e., those passed to .Nm from ether_demux() and ether_output_frame(). .It Cm limit Bro Cm src-addr | src-port | dst-addr | dst-port Brc Ar N Op Ar :flowname The firewall will only allow .Ar N connections with the same set of parameters as specified in the rule. One or more of source and destination addresses and ports can be specified. .It Cm lookup Bro Cm dst-ip | dst-port | src-ip | src-port | uid | jail Brc Ar name Search an entry in lookup table .Ar name that matches the field specified as argument. If not found, the match fails. Otherwise, the match succeeds and .Cm tablearg is set to the value extracted from the table. .Pp This option can be useful to quickly dispatch traffic based on certain packet fields. See the .Sx LOOKUP TABLES section below for more information on lookup tables. .It Cm { MAC | mac } Ar dst-mac src-mac Match packets with a given .Ar dst-mac and .Ar src-mac addresses, specified as the .Cm any keyword (matching any MAC address), or six groups of hex digits separated by colons, and optionally followed by a mask indicating the significant bits. The mask may be specified using either of the following methods: .Bl -enum -width indent .It A slash .Pq / followed by the number of significant bits. For example, an address with 33 significant bits could be specified as: .Pp .Dl "MAC 10:20:30:40:50:60/33 any" .It An ampersand .Pq & followed by a bitmask specified as six groups of hex digits separated by colons. For example, an address in which the last 16 bits are significant could be specified as: .Pp .Dl "MAC 10:20:30:40:50:60&00:00:00:00:ff:ff any" .Pp Note that the ampersand character has a special meaning in many shells and should generally be escaped. .El Note that the order of MAC addresses (destination first, source second) is the same as on the wire, but the opposite of the one used for IP addresses. .It Cm mac-type Ar mac-type Matches packets whose Ethernet Type field corresponds to one of those specified as argument. .Ar mac-type is specified in the same way as .Cm port numbers (i.e., one or more comma-separated single values or ranges). You can use symbolic names for known values such as .Em vlan , ipv4, ipv6 . Values can be entered as decimal or hexadecimal (if prefixed by 0x), and they are always printed as hexadecimal (unless the .Cm -N option is used, in which case symbolic resolution will be attempted). .It Cm proto Ar protocol Matches packets with the corresponding IP protocol. .It Cm record-state Upon a match, the firewall will create a dynamic rule as if .Cm keep-state was specified. However, this option doesn't imply an implicit .Cm check-state in contrast to .Cm keep-state . .It Cm recv | xmit | via Brq Ar ifX | Ar if Ns Cm * | Ar table Ns Po Ar name Ns Oo , Ns Ar value Oc Pc | Ar ipno | Ar any Matches packets received, transmitted or going through, respectively, the interface specified by exact name .Po Ar ifX Pc , by device name .Po Ar if* Pc , by IP address, or through some interface. Table .Ar name may be used to match interface by its kernel ifindex. See the .Sx LOOKUP TABLES section below for more information on lookup tables. .Pp The .Cm via keyword causes the interface to always be checked. If .Cm recv or .Cm xmit is used instead of .Cm via , then only the receive or transmit interface (respectively) is checked. By specifying both, it is possible to match packets based on both receive and transmit interface, e.g.: .Pp .Dl "ipfw add deny ip from any to any out recv ed0 xmit ed1" .Pp The .Cm recv interface can be tested on either incoming or outgoing packets, while the .Cm xmit interface can only be tested on outgoing packets. So .Cm out is required (and .Cm in is invalid) whenever .Cm xmit is used. .Pp A packet might not have a receive or transmit interface: packets originating from the local host have no receive interface, while packets destined for the local host have no transmit interface. .It Cm set-limit Bro Cm src-addr | src-port | dst-addr | dst-port Brc Ar N Works like .Cm limit but does not have an implicit .Cm check-state attached to it. .It Cm setup Matches TCP packets that have the SYN bit set but no ACK bit. This is the short form of .Dq Li tcpflags\ syn,!ack . .It Cm sockarg Matches packets that are associated to a local socket and for which the SO_USER_COOKIE socket option has been set to a non-zero value. As a side effect, the value of the option is made available as .Cm tablearg value, which in turn can be used as .Cm skipto or .Cm pipe number. .It Cm src-ip Ar ip-address Matches IPv4 packets whose source IP is one of the address(es) specified as an argument. .It Cm src-ip6 Ar ip6-address Matches IPv6 packets whose source IP is one of the address(es) specified as an argument. .It Cm src-port Ar ports Matches IP packets whose source port is one of the port(s) specified as argument. .It Cm tagged Ar tag-list Matches packets whose tags are included in .Ar tag-list , which is either a single value or a list of values or ranges specified in the same way as .Ar ports . Tags can be applied to the packet using .Cm tag rule action parameter (see it's description for details on tags). .It Cm tcpack Ar ack TCP packets only. Match if the TCP header acknowledgment number field is set to .Ar ack . .It Cm tcpdatalen Ar tcpdatalen-list Matches TCP packets whose length of TCP data is .Ar tcpdatalen-list , which is either a single value or a list of values or ranges specified in the same way as .Ar ports . .It Cm tcpflags Ar spec TCP packets only. Match if the TCP header contains the comma separated list of flags specified in .Ar spec . The supported TCP flags are: .Pp .Cm fin , .Cm syn , .Cm rst , .Cm psh , .Cm ack and .Cm urg . The absence of a particular flag may be denoted with a .Ql \&! . A rule which contains a .Cm tcpflags specification can never match a fragmented packet which has a non-zero offset. See the .Cm frag option for details on matching fragmented packets. .It Cm tcpseq Ar seq TCP packets only. Match if the TCP header sequence number field is set to .Ar seq . .It Cm tcpwin Ar tcpwin-list Matches TCP packets whose header window field is set to .Ar tcpwin-list , which is either a single value or a list of values or ranges specified in the same way as .Ar ports . .It Cm tcpoptions Ar spec TCP packets only. Match if the TCP header contains the comma separated list of options specified in .Ar spec . The supported TCP options are: .Pp .Cm mss (maximum segment size), .Cm window (tcp window advertisement), .Cm sack (selective ack), .Cm ts (rfc1323 timestamp) and .Cm cc (rfc1644 t/tcp connection count). The absence of a particular option may be denoted with a .Ql \&! . .It Cm uid Ar user Match all TCP or UDP packets sent by or received for a .Ar user . A .Ar user may be matched by name or identification number. .It Cm verrevpath For incoming packets, a routing table lookup is done on the packet's source address. If the interface on which the packet entered the system matches the outgoing interface for the route, the packet matches. If the interfaces do not match up, the packet does not match. All outgoing packets or packets with no incoming interface match. .Pp The name and functionality of the option is intentionally similar to the Cisco IOS command: .Pp .Dl ip verify unicast reverse-path .Pp This option can be used to make anti-spoofing rules to reject all packets with source addresses not from this interface. See also the option .Cm antispoof . .It Cm versrcreach For incoming packets, a routing table lookup is done on the packet's source address. If a route to the source address exists, but not the default route or a blackhole/reject route, the packet matches. Otherwise, the packet does not match. All outgoing packets match. .Pp The name and functionality of the option is intentionally similar to the Cisco IOS command: .Pp .Dl ip verify unicast source reachable-via any .Pp This option can be used to make anti-spoofing rules to reject all packets whose source address is unreachable. .It Cm antispoof For incoming packets, the packet's source address is checked if it belongs to a directly connected network. If the network is directly connected, then the interface the packet came on in is compared to the interface the network is connected to. When incoming interface and directly connected interface are not the same, the packet does not match. Otherwise, the packet does match. All outgoing packets match. .Pp This option can be used to make anti-spoofing rules to reject all packets that pretend to be from a directly connected network but do not come in through that interface. This option is similar to but more restricted than .Cm verrevpath because it engages only on packets with source addresses of directly connected networks instead of all source addresses. .El .Sh LOOKUP TABLES Lookup tables are useful to handle large sparse sets of addresses or other search keys (e.g., ports, jail IDs, interface names). In the rest of this section we will use the term ``key''. Table name needs to match the following spec: .Ar table-name . Tables with the same name can be created in different .Ar sets . However, rule links to the tables in .Ar set 0 by default. This behavior can be controlled by .Va net.inet.ip.fw.tables_sets variable. See the .Sx SETS OF RULES section for more information. There may be up to 65535 different lookup tables. .Pp The following table types are supported: .Bl -tag -width indent .It Ar table-type : Ar addr | iface | number | flow .It Ar table-key : Ar addr Ns Oo / Ns Ar masklen Oc | iface-name | number | flow-spec .It Ar flow-spec : Ar flow-field Ns Op , Ns Ar flow-spec .It Ar flow-field : src-ip | proto | src-port | dst-ip | dst-port .It Cm addr matches IPv4 or IPv6 address. Each entry is represented by an .Ar addr Ns Op / Ns Ar masklen and will match all addresses with base .Ar addr (specified as an IPv4/IPv6 address, or a hostname) and mask width of .Ar masklen bits. If .Ar masklen is not specified, it defaults to 32 for IPv4 and 128 for IPv6. When looking up an IP address in a table, the most specific entry will match. .It Cm iface matches interface names. Each entry is represented by string treated as interface name. Wildcards are not supported. .It Cm number maches protocol ports, uids/gids or jail IDs. Each entry is represented by 32-bit unsigned integer. Ranges are not supported. .It Cm flow Matches packet fields specified by .Ar flow type suboptions with table entries. .El .Pp Tables require explicit creation via .Cm create before use. .Pp The following creation options are supported: .Bl -tag -width indent .It Ar create-options : Ar create-option | create-options .It Ar create-option : Cm type Ar table-type | Cm valtype Ar value-mask | Cm algo Ar algo-desc | .Cm limit Ar number | Cm locked .It Cm type Table key type. .It Cm valtype Table value mask. .It Cm algo Table algorithm to use (see below). .It Cm limit Maximum number of items that may be inserted into table. .It Cm locked Restrict any table modifications. .El .Pp Some of these options may be modified later via .Cm modify keyword. The following options can be changed: .Bl -tag -width indent .It Ar modify-options : Ar modify-option | modify-options .It Ar modify-option : Cm limit Ar number .It Cm limit Alter maximum number of items that may be inserted into table. .El .Pp Additionally, table can be locked or unlocked using .Cm lock or .Cm unlock commands. .Pp Tables of the same .Ar type can be swapped with each other using .Cm swap Ar name command. Swap may fail if tables limits are set and data exchange would result in limits hit. Operation is performed atomically. .Pp One or more entries can be added to a table at once using .Cm add command. Addition of all items are performed atomically. By default, error in addition of one entry does not influence addition of other entries. However, non-zero error code is returned in that case. Special .Cm atomic keyword may be specified before .Cm add to indicate all-or-none add request. .Pp One or more entries can be removed from a table at once using .Cm delete command. By default, error in removal of one entry does not influence removing of other entries. However, non-zero error code is returned in that case. .Pp It may be possible to check what entry will be found on particular .Ar table-key using .Cm lookup .Ar table-key command. This functionality is optional and may be unsupported in some algorithms. .Pp The following operations can be performed on .Ar one or .Cm all tables: .Bl -tag -width indent .It Cm list List all entries. .It Cm flush Removes all entries. .It Cm info Shows generic table information. .It Cm detail Shows generic table information and algo-specific data. .El .Pp The following lookup algorithms are supported: .Bl -tag -width indent .It Ar algo-desc : algo-name | "algo-name algo-data" .It Ar algo-name: Ar addr:radix | addr:hash | iface:array | number:array | flow:hash .It Cm addr:radix Separate Radix trees for IPv4 and IPv6, the same way as the routing table (see .Xr route 4 ) . Default choice for .Ar addr type. .It Cm addr:hash Separate auto-growing hashes for IPv4 and IPv6. Accepts entries with the same mask length specified initially via .Cm "addr:hash masks=/v4,/v6" algorithm creation options. Assume /32 and /128 masks by default. Search removes host bits (according to mask) from supplied address and checks resulting key in appropriate hash. Mostly optimized for /64 and byte-ranged IPv6 masks. .It Cm iface:array Array storing sorted indexes for entries which are presented in the system. Optimized for very fast lookup. .It Cm number:array Array storing sorted u32 numbers. .It Cm flow:hash Auto-growing hash storing flow entries. Search calculates hash on required packet fields and searches for matching entries in selected bucket. .El .Pp The .Cm tablearg feature provides the ability to use a value, looked up in the table, as the argument for a rule action, action parameter or rule option. This can significantly reduce number of rules in some configurations. If two tables are used in a rule, the result of the second (destination) is used. .Pp Each record may hold one or more values according to .Ar value-mask . This mask is set on table creation via .Cm valtype option. The following value types are supported: .Bl -tag -width indent .It Ar value-mask : Ar value-type Ns Op , Ns Ar value-mask .It Ar value-type : Ar skipto | pipe | fib | nat | dscp | tag | divert | .Ar netgraph | limit | ipv4 .It Cm skipto rule number to jump to. .It Cm pipe Pipe number to use. .It Cm fib fib number to match/set. .It Cm nat nat number to jump to. .It Cm dscp dscp value to match/set. .It Cm tag tag number to match/set. .It Cm divert port number to divert traffic to. .It Cm netgraph hook number to move packet to. .It Cm limit maximum number of connections. .It Cm ipv4 IPv4 nexthop to fwd packets to. .It Cm ipv6 IPv6 nexthop to fwd packets to. .El .Pp The .Cm tablearg argument can be used with the following actions: .Cm nat, pipe , queue, divert, tee, netgraph, ngtee, fwd, skipto, setfib, action parameters: .Cm tag, untag, rule options: .Cm limit, tagged. .Pp When used with the .Cm skipto action, the user should be aware that the code will walk the ruleset up to a rule equal to, or past, the given number. .Pp See the .Sx EXAMPLES Section for example usage of tables and the tablearg keyword. .Sh SETS OF RULES Each rule or table belongs to one of 32 different .Em sets , numbered 0 to 31. Set 31 is reserved for the default rule. .Pp By default, rules or tables are put in set 0, unless you use the .Cm set N attribute when adding a new rule or table. Sets can be individually and atomically enabled or disabled, so this mechanism permits an easy way to store multiple configurations of the firewall and quickly (and atomically) switch between them. .Pp By default, tables from set 0 are referenced when adding rule with table opcodes regardless of rule set. This behavior can be changed by setting .Va net.inet.ip.fw.tables_sets variable to 1. Rule's set will then be used for table references. .Pp The command to enable/disable sets is .Bd -ragged -offset indent .Nm .Cm set Oo Cm disable Ar number ... Oc Op Cm enable Ar number ... .Ed .Pp where multiple .Cm enable or .Cm disable sections can be specified. Command execution is atomic on all the sets specified in the command. By default, all sets are enabled. .Pp When you disable a set, its rules behave as if they do not exist in the firewall configuration, with only one exception: .Bd -ragged -offset indent dynamic rules created from a rule before it had been disabled will still be active until they expire. In order to delete dynamic rules you have to explicitly delete the parent rule which generated them. .Ed .Pp The set number of rules can be changed with the command .Bd -ragged -offset indent .Nm .Cm set move .Brq Cm rule Ar rule-number | old-set .Cm to Ar new-set .Ed .Pp Also, you can atomically swap two rulesets with the command .Bd -ragged -offset indent .Nm .Cm set swap Ar first-set second-set .Ed .Pp See the .Sx EXAMPLES Section on some possible uses of sets of rules. .Sh STATEFUL FIREWALL Stateful operation is a way for the firewall to dynamically create rules for specific flows when packets that match a given pattern are detected. Support for stateful operation comes through the .Cm check-state , keep-state , record-state , limit and .Cm set-limit options of .Nm rules . .Pp Dynamic rules are created when a packet matches a .Cm keep-state , .Cm record-state , .Cm limit or .Cm set-limit rule, causing the creation of a .Em dynamic rule which will match all and only packets with a given .Em protocol between a .Em src-ip/src-port dst-ip/dst-port pair of addresses .Em ( src and .Em dst are used here only to denote the initial match addresses, but they are completely equivalent afterwards). Rules created by .Cm keep-state option also have a .Ar :flowname taken from it. This name is used in matching together with addresses, ports and protocol. Dynamic rules will be checked at the first .Cm check-state, keep-state or .Cm limit occurrence, and the action performed upon a match will be the same as in the parent rule. .Pp Note that no additional attributes other than protocol and IP addresses and ports and :flowname are checked on dynamic rules. .Pp The typical use of dynamic rules is to keep a closed firewall configuration, but let the first TCP SYN packet from the inside network install a dynamic rule for the flow so that packets belonging to that session will be allowed through the firewall: .Pp .Dl "ipfw add check-state :OUTBOUND" .Dl "ipfw add allow tcp from my-subnet to any setup keep-state :OUTBOUND" .Dl "ipfw add deny tcp from any to any" .Pp A similar approach can be used for UDP, where an UDP packet coming from the inside will install a dynamic rule to let the response through the firewall: .Pp .Dl "ipfw add check-state :OUTBOUND" .Dl "ipfw add allow udp from my-subnet to any keep-state :OUTBOUND" .Dl "ipfw add deny udp from any to any" .Pp Dynamic rules expire after some time, which depends on the status of the flow and the setting of some .Cm sysctl variables. See Section .Sx SYSCTL VARIABLES for more details. For TCP sessions, dynamic rules can be instructed to periodically send keepalive packets to refresh the state of the rule when it is about to expire. .Pp See Section .Sx EXAMPLES for more examples on how to use dynamic rules. .Sh TRAFFIC SHAPER (DUMMYNET) CONFIGURATION .Nm is also the user interface for the .Nm dummynet traffic shaper, packet scheduler and network emulator, a subsystem that can artificially queue, delay or drop packets emulating the behaviour of certain network links or queueing systems. .Pp .Nm dummynet operates by first using the firewall to select packets using any match pattern that can be used in .Nm rules. Matching packets are then passed to either of two different objects, which implement the traffic regulation: .Bl -hang -offset XXXX .It Em pipe A .Em pipe emulates a .Em link with given bandwidth and propagation delay, driven by a FIFO scheduler and a single queue with programmable queue size and packet loss rate. Packets are appended to the queue as they come out from .Nm ipfw , and then transferred in FIFO order to the link at the desired rate. .It Em queue A .Em queue is an abstraction used to implement packet scheduling using one of several packet scheduling algorithms. Packets sent to a .Em queue are first grouped into flows according to a mask on the 5-tuple. Flows are then passed to the scheduler associated to the .Em queue , and each flow uses scheduling parameters (weight and others) as configured in the .Em queue itself. A scheduler in turn is connected to an emulated link, and arbitrates the link's bandwidth among backlogged flows according to weights and to the features of the scheduling algorithm in use. .El .Pp In practice, .Em pipes can be used to set hard limits to the bandwidth that a flow can use, whereas .Em queues can be used to determine how different flows share the available bandwidth. .Pp A graphical representation of the binding of queues, flows, schedulers and links is below. .Bd -literal -offset indent (flow_mask|sched_mask) sched_mask +---------+ weight Wx +-------------+ | |->-[flow]-->--| |-+ -->--| QUEUE x | ... | | | | |->-[flow]-->--| SCHEDuler N | | +---------+ | | | ... | +--[LINK N]-->-- +---------+ weight Wy | | +--[LINK N]-->-- | |->-[flow]-->--| | | -->--| QUEUE y | ... | | | | |->-[flow]-->--| | | +---------+ +-------------+ | +-------------+ .Ed It is important to understand the role of the SCHED_MASK and FLOW_MASK, which are configured through the commands .Dl "ipfw sched N config mask SCHED_MASK ..." and .Dl "ipfw queue X config mask FLOW_MASK ..." . .Pp The SCHED_MASK is used to assign flows to one or more scheduler instances, one for each value of the packet's 5-tuple after applying SCHED_MASK. As an example, using ``src-ip 0xffffff00'' creates one instance for each /24 destination subnet. .Pp The FLOW_MASK, together with the SCHED_MASK, is used to split packets into flows. As an example, using ``src-ip 0x000000ff'' together with the previous SCHED_MASK makes a flow for each individual source address. In turn, flows for each /24 subnet will be sent to the same scheduler instance. .Pp The above diagram holds even for the .Em pipe case, with the only restriction that a .Em pipe only supports a SCHED_MASK, and forces the use of a FIFO scheduler (these are for backward compatibility reasons; in fact, internally, a .Nm dummynet's pipe is implemented exactly as above). .Pp There are two modes of .Nm dummynet operation: .Dq normal and .Dq fast . The .Dq normal mode tries to emulate a real link: the .Nm dummynet scheduler ensures that the packet will not leave the pipe faster than it would on the real link with a given bandwidth. The .Dq fast mode allows certain packets to bypass the .Nm dummynet scheduler (if packet flow does not exceed pipe's bandwidth). This is the reason why the .Dq fast mode requires less CPU cycles per packet (on average) and packet latency can be significantly lower in comparison to a real link with the same bandwidth. The default mode is .Dq normal . The .Dq fast mode can be enabled by setting the .Va net.inet.ip.dummynet.io_fast .Xr sysctl 8 variable to a non-zero value. .Pp .Ss PIPE, QUEUE AND SCHEDULER CONFIGURATION The .Em pipe , .Em queue and .Em scheduler configuration commands are the following: .Bd -ragged -offset indent .Cm pipe Ar number Cm config Ar pipe-configuration .Pp .Cm queue Ar number Cm config Ar queue-configuration .Pp .Cm sched Ar number Cm config Ar sched-configuration .Ed .Pp The following parameters can be configured for a pipe: .Pp .Bl -tag -width indent -compact .It Cm bw Ar bandwidth | device Bandwidth, measured in .Sm off .Op Cm K | M | G .Brq Cm bit/s | Byte/s . .Sm on .Pp A value of 0 (default) means unlimited bandwidth. The unit must immediately follow the number, as in .Pp .Dl "ipfw pipe 1 config bw 300Kbit/s" .Pp If a device name is specified instead of a numeric value, as in .Pp .Dl "ipfw pipe 1 config bw tun0" .Pp then the transmit clock is supplied by the specified device. At the moment only the .Xr tun 4 device supports this functionality, for use in conjunction with .Xr ppp 8 . .Pp .It Cm delay Ar ms-delay Propagation delay, measured in milliseconds. The value is rounded to the next multiple of the clock tick (typically 10ms, but it is a good practice to run kernels with .Dq "options HZ=1000" to reduce the granularity to 1ms or less). The default value is 0, meaning no delay. .Pp .It Cm burst Ar size If the data to be sent exceeds the pipe's bandwidth limit (and the pipe was previously idle), up to .Ar size bytes of data are allowed to bypass the .Nm dummynet scheduler, and will be sent as fast as the physical link allows. Any additional data will be transmitted at the rate specified by the .Nm pipe bandwidth. The burst size depends on how long the pipe has been idle; the effective burst size is calculated as follows: MAX( .Ar size , .Nm bw * pipe_idle_time). .Pp .It Cm profile Ar filename A file specifying the additional overhead incurred in the transmission of a packet on the link. .Pp Some link types introduce extra delays in the transmission of a packet, e.g., because of MAC level framing, contention on the use of the channel, MAC level retransmissions and so on. From our point of view, the channel is effectively unavailable for this extra time, which is constant or variable depending on the link type. Additionally, packets may be dropped after this time (e.g., on a wireless link after too many retransmissions). We can model the additional delay with an empirical curve that represents its distribution. .Bd -literal -offset indent cumulative probability 1.0 ^ | L +-- loss-level x | ****** | * | ***** | * | ** | * +-------*-------------------> delay .Ed The empirical curve may have both vertical and horizontal lines. Vertical lines represent constant delay for a range of probabilities. Horizontal lines correspond to a discontinuity in the delay distribution: the pipe will use the largest delay for a given probability. .Pp The file format is the following, with whitespace acting as a separator and '#' indicating the beginning a comment: .Bl -tag -width indent .It Cm name Ar identifier optional name (listed by "ipfw pipe show") to identify the delay distribution; .It Cm bw Ar value the bandwidth used for the pipe. If not specified here, it must be present explicitly as a configuration parameter for the pipe; .It Cm loss-level Ar L the probability above which packets are lost. (0.0 <= L <= 1.0, default 1.0 i.e., no loss); .It Cm samples Ar N the number of samples used in the internal representation of the curve (2..1024; default 100); .It Cm "delay prob" | "prob delay" One of these two lines is mandatory and defines the format of the following lines with data points. .It Ar XXX Ar YYY 2 or more lines representing points in the curve, with either delay or probability first, according to the chosen format. The unit for delay is milliseconds. Data points do not need to be sorted. Also, the number of actual lines can be different from the value of the "samples" parameter: .Nm utility will sort and interpolate the curve as needed. .El .Pp Example of a profile file: .Bd -literal -offset indent name bla_bla_bla samples 100 loss-level 0.86 prob delay 0 200 # minimum overhead is 200ms 0.5 200 0.5 300 0.8 1000 0.9 1300 1 1300 #configuration file end .Ed .El .Pp The following parameters can be configured for a queue: .Pp .Bl -tag -width indent -compact .It Cm pipe Ar pipe_nr Connects a queue to the specified pipe. Multiple queues (with the same or different weights) can be connected to the same pipe, which specifies the aggregate rate for the set of queues. .Pp .It Cm weight Ar weight Specifies the weight to be used for flows matching this queue. The weight must be in the range 1..100, and defaults to 1. .El .Pp The following case-insensitive parameters can be configured for a scheduler: .Pp .Bl -tag -width indent -compact .It Cm type Ar {fifo | wf2q+ | rr | qfq | fq_codel | fq_pie} specifies the scheduling algorithm to use. .Bl -tag -width indent -compact .It Cm fifo is just a FIFO scheduler (which means that all packets are stored in the same queue as they arrive to the scheduler). FIFO has O(1) per-packet time complexity, with very low constants (estimate 60-80ns on a 2GHz desktop machine) but gives no service guarantees. .It Cm wf2q+ implements the WF2Q+ algorithm, which is a Weighted Fair Queueing algorithm which permits flows to share bandwidth according to their weights. Note that weights are not priorities; even a flow with a minuscule weight will never starve. WF2Q+ has O(log N) per-packet processing cost, where N is the number of flows, and is the default algorithm used by previous versions dummynet's queues. .It Cm rr implements the Deficit Round Robin algorithm, which has O(1) processing costs (roughly, 100-150ns per packet) and permits bandwidth allocation according to weights, but with poor service guarantees. .It Cm qfq implements the QFQ algorithm, which is a very fast variant of WF2Q+, with similar service guarantees and O(1) processing costs (roughly, 200-250ns per packet). .It Cm fq_codel implements the FQ-CoDel (FlowQueue-CoDel) scheduler/AQM algorithm, which uses a modified Deficit Round Robin scheduler to manage two lists of sub-queues (old sub-queues and new sub-queues) for providing brief periods of priority to lightweight or short burst flows. By default, the total number of sub-queues is 1024. FQ-CoDel's internal, dynamically created sub-queues are controlled by separate instances of CoDel AQM. .It Cm fq_pie implements the FQ-PIE (FlowQueue-PIE) scheduler/AQM algorithm, which similar to .Cm fq_codel but uses per sub-queue PIE AQM instance to control the queue delay. .El .Pp .Cm fq_codel inherits AQM parameters and options from .Cm codel (see below), and .Cm fq_pie inherits AQM parameters and options from .Cm pie (see below). Additionally, both of .Cm fq_codel and .Cm fq_pie have shared scheduler parameters which are: .Bl -tag -width indent .It Cm quantum .Ar m specifies the quantum (credit) of the scheduler. .Ar m is the number of bytes a queue can serve before being moved to the tail of old queues list. The default is 1514 bytes, and the maximum accepable value is 9000 bytes. .It Cm limit .Ar m specifies the hard size limit (in unit of packets) of all queues managed by an instance of the scheduler. The default value of .Ar m is 10240 packets, and the maximum accepable value is 20480 packets. .It Cm flows .Ar m specifies the total number of flow queues (sub-queues) that fq_* creates and manages. By default, 1024 sub-queues are created when an instance of the fq_{codel/pie} scheduler is created. The maximum accepable value is 65536. .El .Pp Note that any token after .Cm fq_codel or .Cm fq_pie is considered a parameter for fq_{codel/pie}. So, ensure all scheduler configuration options not related to fq_{codel/pie} are written before .Cm fq_codel/fq_pie tokens. .El .Pp In addition to the type, all parameters allowed for a pipe can also be specified for a scheduler. .Pp Finally, the following parameters can be configured for both pipes and queues: .Pp .Bl -tag -width XXXX -compact .It Cm buckets Ar hash-table-size Specifies the size of the hash table used for storing the various queues. Default value is 64 controlled by the .Xr sysctl 8 variable .Va net.inet.ip.dummynet.hash_size , allowed range is 16 to 65536. .Pp .It Cm mask Ar mask-specifier Packets sent to a given pipe or queue by an .Nm rule can be further classified into multiple flows, each of which is then sent to a different .Em dynamic pipe or queue. A flow identifier is constructed by masking the IP addresses, ports and protocol types as specified with the .Cm mask options in the configuration of the pipe or queue. For each different flow identifier, a new pipe or queue is created with the same parameters as the original object, and matching packets are sent to it. .Pp Thus, when .Em dynamic pipes are used, each flow will get the same bandwidth as defined by the pipe, whereas when .Em dynamic queues are used, each flow will share the parent's pipe bandwidth evenly with other flows generated by the same queue (note that other queues with different weights might be connected to the same pipe). .br Available mask specifiers are a combination of one or more of the following: .Pp .Cm dst-ip Ar mask , .Cm dst-ip6 Ar mask , .Cm src-ip Ar mask , .Cm src-ip6 Ar mask , .Cm dst-port Ar mask , .Cm src-port Ar mask , .Cm flow-id Ar mask , .Cm proto Ar mask or .Cm all , .Pp where the latter means all bits in all fields are significant. .Pp .It Cm noerror When a packet is dropped by a .Nm dummynet queue or pipe, the error is normally reported to the caller routine in the kernel, in the same way as it happens when a device queue fills up. Setting this option reports the packet as successfully delivered, which can be needed for some experimental setups where you want to simulate loss or congestion at a remote router. .Pp .It Cm plr Ar packet-loss-rate Packet loss rate. Argument .Ar packet-loss-rate is a floating-point number between 0 and 1, with 0 meaning no loss, 1 meaning 100% loss. The loss rate is internally represented on 31 bits. .Pp .It Cm queue Brq Ar slots | size Ns Cm Kbytes Queue size, in .Ar slots or .Cm KBytes . Default value is 50 slots, which is the typical queue size for Ethernet devices. Note that for slow speed links you should keep the queue size short or your traffic might be affected by a significant queueing delay. E.g., 50 max-sized ethernet packets (1500 bytes) mean 600Kbit or 20s of queue on a 30Kbit/s pipe. Even worse effects can result if you get packets from an interface with a much larger MTU, e.g.\& the loopback interface with its 16KB packets. The .Xr sysctl 8 variables .Em net.inet.ip.dummynet.pipe_byte_limit and .Em net.inet.ip.dummynet.pipe_slot_limit control the maximum lengths that can be specified. .Pp .It Cm red | gred Ar w_q Ns / Ns Ar min_th Ns / Ns Ar max_th Ns / Ns Ar max_p [ecn] Make use of the RED (Random Early Detection) queue management algorithm. .Ar w_q and .Ar max_p are floating point numbers between 0 and 1 (inclusive), while .Ar min_th and .Ar max_th are integer numbers specifying thresholds for queue management (thresholds are computed in bytes if the queue has been defined in bytes, in slots otherwise). The two parameters can also be of the same value if needed. The .Nm dummynet also supports the gentle RED variant (gred) and ECN (Explicit Congestion Notification) as optional. Three .Xr sysctl 8 variables can be used to control the RED behaviour: .Bl -tag -width indent .It Va net.inet.ip.dummynet.red_lookup_depth specifies the accuracy in computing the average queue when the link is idle (defaults to 256, must be greater than zero) .It Va net.inet.ip.dummynet.red_avg_pkt_size specifies the expected average packet size (defaults to 512, must be greater than zero) .It Va net.inet.ip.dummynet.red_max_pkt_size specifies the expected maximum packet size, only used when queue thresholds are in bytes (defaults to 1500, must be greater than zero). .El .Pp .It Cm codel Oo Cm target Ar time Oc Oo Cm interval Ar time Oc Oo Cm ecn | .Cm noecn Oc Make use of the CoDel (Controlled-Delay) queue management algorithm. .Ar time is interpreted as milliseconds by default but seconds (s), milliseconds (ms) or microseconds (us) can be specified instead. CoDel drops or marks (ECN) packets depending on packet sojourn time in the queue. .Cm target .Ar time (5ms by default) is the minimum acceptable persistent queue delay that CoDel allows. CoDel does not drop packets directly after packets sojourn time becomes higher than .Cm target .Ar time but waits for .Cm interval .Ar time (100ms default) before dropping. .Cm interval .Ar time should be set to maximum RTT for all expected connections. .Cm ecn enables (disabled by default) packet marking (instead of dropping) for ECN-enabled TCP flows when queue delay becomes high. .Pp Note that any token after .Cm codel is considered a parameter for CoDel. So, ensure all pipe/queue configuration options are written before .Cm codel token. .Pp The .Xr sysctl 8 variables .Va net.inet.ip.dummynet.codel.target and .Va net.inet.ip.dummynet.codel.interval can be used to set CoDel default parameters. .Pp .It Cm pie Oo Cm target Ar time Oc Oo Cm tupdate Ar time Oc Oo .Cm alpha Ar n Oc Oo Cm beta Ar n Oc Oo Cm max_burst Ar time Oc Oo .Cm max_ecnth Ar n Oc Oo Cm ecn | Cm noecn Oc Oo Cm capdrop | .Cm nocapdrop Oc Oo Cm drand | Cm nodrand Oc Oo Cm onoff .Oc Oo Cm dre | Cm ts Oc Make use of the PIE (Proportional Integral controller Enhanced) queue management algorithm. PIE drops or marks packets depending on a calculated drop probability during en-queue process, with the aim of achieving high throughput while keeping queue delay low. At regular time intervals of .Cm tupdate .Ar time (15ms by default) a background process (re)calculates the probability based on queue delay deviations from .Cm target .Ar time (15ms by default) and queue delay trends. PIE approximates current queue delay by using a departure rate estimation method, or (optionally) by using a packet timestamp method similar to CoDel. .Ar time is interpreted as milliseconds by default but seconds (s), milliseconds (ms) or microseconds (us) can be specified instead. The other PIE parameters and options are as follows: .Bl -tag -width indent .It Cm alpha Ar n .Ar n is a floating point number between 0 and 7 which specifies the weight of queue delay deviations that is used in drop probability calculation. 0.125 is the default. .It Cm beta Ar n .Ar n is a floating point number between 0 and 7 which specifies is the weight of queue delay trend that is used in drop probability calculation. 1.25 is the default. .It Cm max_burst Ar time The maximum period of time that PIE does not drop/mark packets. 150ms is the default and 10s is the maximum value. .It Cm max_ecnth Ar n Even when ECN is enabled, PIE drops packets instead of marking them when drop probability becomes higher than ECN probability threshold .Cm max_ecnth Ar n , the default is 0.1 (i.e 10%) and 1 is the maximum value. .It Cm ecn | noecn enable or disable ECN marking for ECN-enabled TCP flows. Disabled by default. .It Cm capdrop | nocapdrop enable or disable cap drop adjustment. Cap drop adjustment is enabled by default. .It Cm drand | nodrand enable or disable drop probability de-randomisation. De-randomisation eliminates the problem of dropping packets too close or too far. De-randomisation is enabled by default. .It Cm onoff enable turning PIE on and off depending on queue load. If this option is enabled, PIE turnes on when over 1/3 of queue becomes full. This option is disabled by default. .It Cm dre | ts Calculate queue delay using departure rate estimation .Cm dre or timestamps .Cm ts . .Cm dre is used by default. .El .Pp Note that any token after .Cm pie is considered a parameter for PIE. So ensure all pipe/queue the configuration options are written before .Cm pie token. .Xr sysctl 8 variables can be used to control the .Cm pie default parameters. See the .Sx SYSCTL VARIABLES section for more details. .El .Pp When used with IPv6 data, .Nm dummynet currently has several limitations. Information necessary to route link-local packets to an interface is not available after processing by .Nm dummynet so those packets are dropped in the output path. Care should be taken to ensure that link-local packets are not passed to .Nm dummynet . .Sh CHECKLIST Here are some important points to consider when designing your rules: .Bl -bullet .It Remember that you filter both packets going .Cm in and .Cm out . Most connections need packets going in both directions. .It Remember to test very carefully. It is a good idea to be near the console when doing this. If you cannot be near the console, use an auto-recovery script such as the one in .Pa /usr/share/examples/ipfw/change_rules.sh . .It Do not forget the loopback interface. .El .Sh FINE POINTS .Bl -bullet .It There are circumstances where fragmented datagrams are unconditionally dropped. TCP packets are dropped if they do not contain at least 20 bytes of TCP header, UDP packets are dropped if they do not contain a full 8 byte UDP header, and ICMP packets are dropped if they do not contain 4 bytes of ICMP header, enough to specify the ICMP type, code, and checksum. These packets are simply logged as .Dq pullup failed since there may not be enough good data in the packet to produce a meaningful log entry. .It Another type of packet is unconditionally dropped, a TCP packet with a fragment offset of one. This is a valid packet, but it only has one use, to try to circumvent firewalls. When logging is enabled, these packets are reported as being dropped by rule -1. .It If you are logged in over a network, loading the .Xr kld 4 version of .Nm is probably not as straightforward as you would think. The following command line is recommended: .Bd -literal -offset indent kldload ipfw && \e ipfw add 32000 allow ip from any to any .Ed .Pp Along the same lines, doing an .Bd -literal -offset indent ipfw flush .Ed .Pp in similar surroundings is also a bad idea. .It The .Nm filter list may not be modified if the system security level is set to 3 or higher (see .Xr init 8 for information on system security levels). .El .Sh PACKET DIVERSION A .Xr divert 4 socket bound to the specified port will receive all packets diverted to that port. If no socket is bound to the destination port, or if the divert module is not loaded, or if the kernel was not compiled with divert socket support, the packets are dropped. .Sh NETWORK ADDRESS TRANSLATION (NAT) .Nm support in-kernel NAT using the kernel version of .Xr libalias 3 . The kernel module .Cm ipfw_nat should be loaded or kernel should have .Cm options IPFIREWALL_NAT to be able use NAT. .Pp The nat configuration command is the following: .Bd -ragged -offset indent .Bk -words .Cm nat .Ar nat_number .Cm config .Ar nat-configuration .Ek .Ed .Pp The following parameters can be configured: .Bl -tag -width indent .It Cm ip Ar ip_address Define an ip address to use for aliasing. .It Cm if Ar nic Use ip address of NIC for aliasing, dynamically changing it if NIC's ip address changes. .It Cm log Enable logging on this nat instance. .It Cm deny_in Deny any incoming connection from outside world. .It Cm same_ports Try to leave the alias port numbers unchanged from the actual local port numbers. .It Cm unreg_only Traffic on the local network not originating from an unregistered address spaces will be ignored. .It Cm reset Reset table of the packet aliasing engine on address change. .It Cm reverse Reverse the way libalias handles aliasing. .It Cm proxy_only Obey transparent proxy rules only, packet aliasing is not performed. .It Cm skip_global Skip instance in case of global state lookup (see below). .El .Pp Some specials value can be supplied instead of .Va nat_number: .Bl -tag -width indent .It Cm global Looks up translation state in all configured nat instances. If an entry is found, packet is aliased according to that entry. If no entry was found in any of the instances, packet is passed unchanged, and no new entry will be created. See section .Sx MULTIPLE INSTANCES in .Xr natd 8 for more information. .It Cm tablearg Uses argument supplied in lookup table. See .Sx LOOKUP TABLES section below for more information on lookup tables. .El .Pp To let the packet continue after being (de)aliased, set the sysctl variable .Va net.inet.ip.fw.one_pass to 0. For more information about aliasing modes, refer to .Xr libalias 3 . See Section .Sx EXAMPLES for some examples about nat usage. .Ss REDIRECT AND LSNAT SUPPORT IN IPFW Redirect and LSNAT support follow closely the syntax used in .Xr natd 8 . See Section .Sx EXAMPLES for some examples on how to do redirect and lsnat. .Ss SCTP NAT SUPPORT SCTP nat can be configured in a similar manner to TCP through the .Nm command line tool. The main difference is that .Nm sctp nat does not do port translation. Since the local and global side ports will be the same, there is no need to specify both. Ports are redirected as follows: .Bd -ragged -offset indent .Bk -words .Cm nat .Ar nat_number .Cm config if .Ar nic .Cm redirect_port sctp .Ar ip_address [,addr_list] {[port | port-port] [,ports]} .Ek .Ed .Pp Most .Nm sctp nat configuration can be done in real-time through the .Xr sysctl 8 interface. All may be changed dynamically, though the hash_table size will only change for new .Nm nat instances. See .Sx SYSCTL VARIABLES for more info. .Sh IPv6/IPv4 NETWORK ADDRESS AND PROTOCOL TRANSLATION .Nm supports in-kernel IPv6/IPv4 network address and protocol translation. Stateful NAT64 translation allows IPv6-only clients to contact IPv4 servers using unicast TCP, UDP or ICMP protocols. One or more IPv4 addresses assigned to a stateful NAT64 translator are shared among several IPv6-only clients. When stateful NAT64 is used in conjunction with DNS64, no changes are usually required in the IPv6 client or the IPv4 server. The kernel module .Cm ipfw_nat64 should be loaded or kernel should have .Cm options IPFIREWALL_NAT64 to be able use stateful NAT64 translator. .Pp Stateful NAT64 uses a bunch of memory for several types of objects. When IPv6 client initiates connection, NAT64 translator creates a host entry in the states table. Each host entry has a number of ports group entries allocated on demand. Ports group entries contains connection state entries. There are several options to control limits and lifetime for these objects. .Pp NAT64 translator follows RFC7915 when does ICMPv6/ICMP translation, unsupported message types will be silently dropped. IPv6 needs several ICMPv6 message types to be explicitly allowed for correct operation. Make sure that ND6 neighbor solicitation (ICMPv6 type 135) and neighbor advertisement (ICMPv6 type 136) messages will not be handled by translation rules. .Pp After translation NAT64 translator by default sends packets through corresponding netisr queue. Thus translator host should be configured as IPv4 and IPv6 router. Also this means, that a packet is handled by firewall twice. First time an original packet is handled and consumed by translator, and then it is handled again as translated packet. This behavior can be changed by sysctl variable .Va net.inet.ip.fw.nat64_direct_output . .Pp The stateful NAT64 configuration command is the following: .Bd -ragged -offset indent .Bk -words .Cm nat64lsn .Ar name .Cm create .Ar create-options .Ek .Ed .Pp The following parameters can be configured: .Bl -tag -width indent .It Cm prefix4 Ar ipv4_prefix/plen The IPv4 prefix with mask defines the pool of IPv4 addresses used as source address after translation. Stateful NAT64 module translates IPv6 source address of client to one IPv4 address from this pool. Note that incoming IPv4 packets that don't have corresponding state entry in the states table will be dropped by translator. Make sure that translation rules handle packets, destined to configured prefix. .It Cm prefix6 Ar ipv6_prefix/length The IPv6 prefix defines IPv4-embedded IPv6 addresses used by translator to represent IPv4 addresses. This IPv6 prefix should be configured in DNS64. The translator implementation follows RFC6052, that restricts the length of prefixes to one of following: 32, 40, 48, 56, 64, or 96. The Well-Known IPv6 Prefix 64:ff9b:: must be 96 bits long. .It Cm max_ports Ar number Maximum number of ports reserved for upper level protocols to one IPv6 client. All reserved ports are divided into chunks between supported protocols. The number of connections from one IPv6 client is limited by this option. Note that closed TCP connections still remain in the list of connections until .Cm tcp_close_age interval will not expire. Default value is .Ar 2048 . .It Cm host_del_age Ar seconds The number of seconds until the host entry for a IPv6 client will be deleted and all its resources will be released due to inactivity. Default value is .Ar 3600 . .It Cm pg_del_age Ar seconds The number of seconds until a ports group with unused state entries will be released. Default value is .Ar 900 . .It Cm tcp_syn_age Ar seconds The number of seconds while a state entry for TCP connection with only SYN sent will be kept. If TCP connection establishing will not be finished, state entry will be deleted. Default value is .Ar 10 . .It Cm tcp_est_age Ar seconds The number of seconds while a state entry for established TCP connection will be kept. Default value is .Ar 7200 . .It Cm tcp_close_age Ar seconds The number of seconds while a state entry for closed TCP connection will be kept. Keeping state entries for closed connections is needed, because IPv4 servers typically keep closed connections in a TIME_WAIT state for a several minutes. Since translator's IPv4 addresses are shared among all IPv6 clients, new connections from the same addresses and ports may be rejected by server, because these connections are still in a TIME_WAIT state. Keeping them in translator's state table protects from such rejects. Default value is .Ar 180 . .It Cm udp_age Ar seconds The number of seconds while translator keeps state entry in a waiting for reply to the sent UDP datagram. Default value is .Ar 120 . .It Cm icmp_age Ar seconds The number of seconds while translator keeps state entry in a waiting for reply to the sent ICMP message. Default value is .Ar 60 . .It Cm log Turn on logging of all handled packets via BPF through .Ar ipfwlog0 interface. .Ar ipfwlog0 is a pseudo interface and can be created after a boot manually with .Cm ifconfig command. Note that it has different purpose than .Ar ipfw0 interface. Translators sends to BPF an additional information with each packet. With .Cm tcpdump you are able to see each handled packet before and after translation. .It Cm -log Turn off logging of all handled packets via BPF. .El .Pp To inspect a states table of stateful NAT64 the following command can be used: .Bd -ragged -offset indent .Bk -words .Cm nat64lsn .Ar name .Cm show Cm states .Ek .Ed .Pp .Pp Stateless NAT64 translator doesn't use a states table for translation and converts IPv4 addresses to IPv6 and vice versa solely based on the mappings taken from configured lookup tables. Since a states table doesn't used by stateless translator, it can be configured to pass IPv4 clients to IPv6-only servers. .Pp The stateless NAT64 configuration command is the following: .Bd -ragged -offset indent .Bk -words .Cm nat64stl .Ar name .Cm create .Ar create-options .Ek .Ed .Pp The following parameters can be configured: .Bl -tag -width indent .It Cm prefix6 Ar ipv6_prefix/length The IPv6 prefix defines IPv4-embedded IPv6 addresses used by translator to represent IPv4 addresses. This IPv6 prefix should be configured in DNS64. .It Cm table4 Ar table46 The lookup table .Ar table46 contains mapping how IPv4 addresses should be translated to IPv6 addresses. .It Cm table6 Ar table64 The lookup table .Ar table64 contains mapping how IPv6 addresses should be translated to IPv4 addresses. .It Cm log Turn on logging of all handled packets via BPF through .Ar ipfwlog0 interface. .It Cm -log Turn off logging of all handled packets via BPF. .El .Pp Note that the behavior of stateless translator with respect to not matched packets differs from stateful translator. If corresponding addresses was not found in the lookup tables, the packet will not be dropped and the search continues. .Sh IPv6-to-IPv6 NETWORK PREFIX TRANSLATION (NPTv6) .Nm supports in-kernel IPv6-to-IPv6 network prefix translation as described in RFC6296. The kernel module .Cm ipfw_nptv6 should be loaded or kernel should has .Cm options IPFIREWALL_NPTV6 to be able use NPTv6 translator. .Pp The NPTv6 configuration command is the following: .Bd -ragged -offset indent .Bk -words .Cm nptv6 .Ar name .Cm create .Ar create-options .Ek .Ed .Pp The following parameters can be configured: .Bl -tag -width indent .It Cm int_prefix Ar ipv6_prefix IPv6 prefix used in internal network. NPTv6 module translates source address when it matches this prefix. .It Cm ext_prefix Ar ipv6_prefix IPv6 prefix used in external network. NPTv6 module translates destination address when it matches this prefix. .It Cm ext_if Ar nic The NPTv6 module will use first global IPv6 address from interface .Ar nic as external prefix. It can be useful when IPv6 prefix of external network is dynamically obtained. .Cm ext_prefix and .Cm ext_if options are mutually exclusive. .It Cm prefixlen Ar length The length of specified IPv6 prefixes. It must be in range from 8 to 64. .El .Pp Note that the prefix translation rules are silently ignored when IPv6 packet forwarding is disabled. To enable the packet forwarding, set the sysctl variable .Va net.inet6.ip6.forwarding to 1. .Pp To let the packet continue after being translated, set the sysctl variable .Va net.inet.ip.fw.one_pass to 0. .Sh LOADER TUNABLES Tunables can be set in .Xr loader 8 prompt, .Xr loader.conf 5 or .Xr kenv 1 before ipfw module gets loaded. .Bl -tag -width indent .It Va net.inet.ip.fw.default_to_accept: No 0 Defines ipfw last rule behavior. This value overrides .Cd "options IPFW_DEFAULT_TO_(ACCEPT|DENY)" from kernel configuration file. .It Va net.inet.ip.fw.tables_max: No 128 Defines number of tables available in ipfw. Number cannot exceed 65534. .El .Sh SYSCTL VARIABLES A set of .Xr sysctl 8 variables controls the behaviour of the firewall and associated modules .Pq Nm dummynet , bridge , sctp nat . These are shown below together with their default value (but always check with the .Xr sysctl 8 command what value is actually in use) and meaning: .Bl -tag -width indent .It Va net.inet.ip.alias.sctp.accept_global_ootb_addip: No 0 Defines how the .Nm nat responds to receipt of global OOTB ASCONF-AddIP: .Bl -tag -width indent .It Cm 0 No response (unless a partially matching association exists - ports and vtags match but global address does not) .It Cm 1 .Nm nat will accept and process all OOTB global AddIP messages. .El .Pp Option 1 should never be selected as this forms a security risk. An attacker can establish multiple fake associations by sending AddIP messages. .It Va net.inet.ip.alias.sctp.chunk_proc_limit: No 5 Defines the maximum number of chunks in an SCTP packet that will be parsed for a packet that matches an existing association. This value is enforced to be greater or equal than .Cm net.inet.ip.alias.sctp.initialising_chunk_proc_limit . A high value is a DoS risk yet setting too low a value may result in important control chunks in the packet not being located and parsed. .It Va net.inet.ip.alias.sctp.error_on_ootb: No 1 Defines when the .Nm nat responds to any Out-of-the-Blue (OOTB) packets with ErrorM packets. An OOTB packet is a packet that arrives with no existing association registered in the .Nm nat and is not an INIT or ASCONF-AddIP packet: .Bl -tag -width indent .It Cm 0 ErrorM is never sent in response to OOTB packets. .It Cm 1 ErrorM is only sent to OOTB packets received on the local side. .It Cm 2 ErrorM is sent to the local side and on the global side ONLY if there is a partial match (ports and vtags match but the source global IP does not). This value is only useful if the .Nm nat is tracking global IP addresses. .It Cm 3 ErrorM is sent in response to all OOTB packets on both the local and global side (DoS risk). .El .Pp At the moment the default is 0, since the ErrorM packet is not yet supported by most SCTP stacks. When it is supported, and if not tracking global addresses, we recommend setting this value to 1 to allow multi-homed local hosts to function with the .Nm nat . To track global addresses, we recommend setting this value to 2 to allow global hosts to be informed when they need to (re)send an ASCONF-AddIP. Value 3 should never be chosen (except for debugging) as the .Nm nat will respond to all OOTB global packets (a DoS risk). .It Va net.inet.ip.alias.sctp.hashtable_size: No 2003 Size of hash tables used for .Nm nat lookups (100 < prime_number > 1000001). This value sets the .Nm hash table size for any future created .Nm nat instance and therefore must be set prior to creating a .Nm nat instance. The table sizes may be changed to suit specific needs. If there will be few concurrent associations, and memory is scarce, you may make these smaller. If there will be many thousands (or millions) of concurrent associations, you should make these larger. A prime number is best for the table size. The sysctl update function will adjust your input value to the next highest prime number. .It Va net.inet.ip.alias.sctp.holddown_time: No 0 Hold association in table for this many seconds after receiving a SHUTDOWN-COMPLETE. This allows endpoints to correct shutdown gracefully if a shutdown_complete is lost and retransmissions are required. .It Va net.inet.ip.alias.sctp.init_timer: No 15 Timeout value while waiting for (INIT-ACK|AddIP-ACK). This value cannot be 0. .It Va net.inet.ip.alias.sctp.initialising_chunk_proc_limit: No 2 Defines the maximum number of chunks in an SCTP packet that will be parsed when no existing association exists that matches that packet. Ideally this packet will only be an INIT or ASCONF-AddIP packet. A higher value may become a DoS risk as malformed packets can consume processing resources. .It Va net.inet.ip.alias.sctp.param_proc_limit: No 25 Defines the maximum number of parameters within a chunk that will be parsed in a packet. As for other similar sysctl variables, larger values pose a DoS risk. .It Va net.inet.ip.alias.sctp.log_level: No 0 Level of detail in the system log messages (0 \- minimal, 1 \- event, 2 \- info, 3 \- detail, 4 \- debug, 5 \- max debug). May be a good option in high loss environments. .It Va net.inet.ip.alias.sctp.shutdown_time: No 15 Timeout value while waiting for SHUTDOWN-COMPLETE. This value cannot be 0. .It Va net.inet.ip.alias.sctp.track_global_addresses: No 0 Enables/disables global IP address tracking within the .Nm nat and places an upper limit on the number of addresses tracked for each association: .Bl -tag -width indent .It Cm 0 Global tracking is disabled .It Cm >1 Enables tracking, the maximum number of addresses tracked for each association is limited to this value .El .Pp This variable is fully dynamic, the new value will be adopted for all newly arriving associations, existing associations are treated as they were previously. Global tracking will decrease the number of collisions within the .Nm nat at a cost of increased processing load, memory usage, complexity, and possible .Nm nat state problems in complex networks with multiple .Nm nats . We recommend not tracking global IP addresses, this will still result in a fully functional .Nm nat . .It Va net.inet.ip.alias.sctp.up_timer: No 300 Timeout value to keep an association up with no traffic. This value cannot be 0. .It Va net.inet.ip.dummynet.codel.interval : No 100000 Default .Cm codel AQM interval in microseconds. The value must be in the range 1..5000000. .It Va net.inet.ip.dummynet.codel.target : No 5000 Default .Cm codel AQM target delay time in microseconds (the minimum acceptable persistent queue delay). The value must be in the range 1..5000000. .It Va net.inet.ip.dummynet.expire : No 1 Lazily delete dynamic pipes/queue once they have no pending traffic. You can disable this by setting the variable to 0, in which case the pipes/queues will only be deleted when the threshold is reached. .It Va net.inet.ip.dummynet.fqcodel.flows : No 1024 Defines the default total number of flow queues (sub-queues) that .Cm fq_codel creates and manages. The value must be in the range 1..65536. .It Va net.inet.ip.dummynet.fqcodel.interval : No 100000 Default .Cm fq_codel scheduler/AQM interval in microseconds. The value must be in the range 1..5000000. .It Va net.inet.ip.dummynet.fqcodel.limit : No 10240 The default hard size limit (in unit of packet) of all queues managed by an instance of the .Cm fq_codel scheduler. The value must be in the range 1..20480. .It Va net.inet.ip.dummynet.fqcodel.quantum : No 1514 The default quantum (credit) of the .Cm fq_codel in unit of byte. The value must be in the range 1..9000. .It Va net.inet.ip.dummynet.fqcodel.target : No 5000 Default .Cm fq_codel scheduler/AQM target delay time in microseconds (the minimum acceptable persistent queue delay). The value must be in the range 1..5000000. .It Va net.inet.ip.dummynet.fqpie.alpha : No 125 The default .Ar alpha parameter (scaled by 1000) for .Cm fq_pie scheduler/AQM. The value must be in the range 1..7000. .It Va net.inet.ip.dummynet.fqpie.beta : No 1250 The default .Ar beta parameter (scaled by 1000) for .Cm fq_pie scheduler/AQM. The value must be in the range 1..7000. .It Va net.inet.ip.dummynet.fqpie.flows : No 1024 Defines the default total number of flow queues (sub-queues) that .Cm fq_pie creates and manages. The value must be in the range 1..65536. .It Va net.inet.ip.dummynet.fqpie.limit : No 10240 The default hard size limit (in unit of packet) of all queues managed by an instance of the .Cm fq_pie scheduler. The value must be in the range 1..20480. .It Va net.inet.ip.dummynet.fqpie.max_burst : No 150000 The default maximum period of microseconds that .Cm fq_pie scheduler/AQM does not drop/mark packets. The value must be in the range 1..10000000. .It Va net.inet.ip.dummynet.fqpie.max_ecnth : No 99 The default maximum ECN probability threshold (scaled by 1000) for .Cm fq_pie scheduler/AQM. The value must be in the range 1..7000. .It Va net.inet.ip.dummynet.fqpie.quantum : No 1514 The default quantum (credit) of the .Cm fq_pie in unit of byte. The value must be in the range 1..9000. .It Va net.inet.ip.dummynet.fqpie.target : No 15000 The default .Cm target delay of the .Cm fq_pie in unit of microsecond. The value must be in the range 1..5000000. .It Va net.inet.ip.dummynet.fqpie.tupdate : No 15000 The default .Cm tupdate of the .Cm fq_pie in unit of microsecond. The value must be in the range 1..5000000. .It Va net.inet.ip.dummynet.hash_size : No 64 Default size of the hash table used for dynamic pipes/queues. This value is used when no .Cm buckets option is specified when configuring a pipe/queue. .It Va net.inet.ip.dummynet.io_fast : No 0 If set to a non-zero value, the .Dq fast mode of .Nm dummynet operation (see above) is enabled. .It Va net.inet.ip.dummynet.io_pkt Number of packets passed to .Nm dummynet . .It Va net.inet.ip.dummynet.io_pkt_drop Number of packets dropped by .Nm dummynet . .It Va net.inet.ip.dummynet.io_pkt_fast Number of packets bypassed by the .Nm dummynet scheduler. .It Va net.inet.ip.dummynet.max_chain_len : No 16 Target value for the maximum number of pipes/queues in a hash bucket. The product .Cm max_chain_len*hash_size is used to determine the threshold over which empty pipes/queues will be expired even when .Cm net.inet.ip.dummynet.expire=0 . .It Va net.inet.ip.dummynet.red_lookup_depth : No 256 .It Va net.inet.ip.dummynet.red_avg_pkt_size : No 512 .It Va net.inet.ip.dummynet.red_max_pkt_size : No 1500 Parameters used in the computations of the drop probability for the RED algorithm. .It Va net.inet.ip.dummynet.pie.alpha : No 125 The default .Ar alpha parameter (scaled by 1000) for .Cm pie AQM. The value must be in the range 1..7000. .It Va net.inet.ip.dummynet.pie.beta : No 1250 The default .Ar beta parameter (scaled by 1000) for .Cm pie AQM. The value must be in the range 1..7000. .It Va net.inet.ip.dummynet.pie.max_burst : No 150000 The default maximum period of microseconds that .Cm pie AQM does not drop/mark packets. The value must be in the range 1..10000000. .It Va net.inet.ip.dummynet.pie.max_ecnth : No 99 The default maximum ECN probability threshold (scaled by 1000) for .Cm pie AQM. The value must be in the range 1..7000. .It Va net.inet.ip.dummynet.pie.target : No 15000 The default .Cm target delay of .Cm pie AQM in unit of microsecond. The value must be in the range 1..5000000. .It Va net.inet.ip.dummynet.pie.tupdate : No 15000 The default .Cm tupdate of .Cm pie AQM in unit of microsecond. The value must be in the range 1..5000000. .It Va net.inet.ip.dummynet.pipe_byte_limit : No 1048576 .It Va net.inet.ip.dummynet.pipe_slot_limit : No 100 The maximum queue size that can be specified in bytes or packets. These limits prevent accidental exhaustion of resources such as mbufs. If you raise these limits, you should make sure the system is configured so that sufficient resources are available. .It Va net.inet.ip.fw.autoinc_step : No 100 Delta between rule numbers when auto-generating them. The value must be in the range 1..1000. .It Va net.inet.ip.fw.curr_dyn_buckets : Va net.inet.ip.fw.dyn_buckets The current number of buckets in the hash table for dynamic rules (readonly). .It Va net.inet.ip.fw.debug : No 1 Controls debugging messages produced by .Nm . .It Va net.inet.ip.fw.default_rule : No 65535 The default rule number (read-only). By the design of .Nm , the default rule is the last one, so its number can also serve as the highest number allowed for a rule. .It Va net.inet.ip.fw.dyn_buckets : No 256 The number of buckets in the hash table for dynamic rules. Must be a power of 2, up to 65536. It only takes effect when all dynamic rules have expired, so you are advised to use a .Cm flush command to make sure that the hash table is resized. .It Va net.inet.ip.fw.dyn_count : No 3 Current number of dynamic rules (read-only). .It Va net.inet.ip.fw.dyn_keepalive : No 1 Enables generation of keepalive packets for .Cm keep-state rules on TCP sessions. A keepalive is generated to both sides of the connection every 5 seconds for the last 20 seconds of the lifetime of the rule. .It Va net.inet.ip.fw.dyn_max : No 8192 Maximum number of dynamic rules. When you hit this limit, no more dynamic rules can be installed until old ones expire. .It Va net.inet.ip.fw.dyn_ack_lifetime : No 300 .It Va net.inet.ip.fw.dyn_syn_lifetime : No 20 .It Va net.inet.ip.fw.dyn_fin_lifetime : No 1 .It Va net.inet.ip.fw.dyn_rst_lifetime : No 1 .It Va net.inet.ip.fw.dyn_udp_lifetime : No 5 .It Va net.inet.ip.fw.dyn_short_lifetime : No 30 These variables control the lifetime, in seconds, of dynamic rules. Upon the initial SYN exchange the lifetime is kept short, then increased after both SYN have been seen, then decreased again during the final FIN exchange or when a RST is received. Both .Em dyn_fin_lifetime and .Em dyn_rst_lifetime must be strictly lower than 5 seconds, the period of repetition of keepalives. The firewall enforces that. .It Va net.inet.ip.fw.dyn_keep_states: No 0 Keep dynamic states on rule/set deletion. States are relinked to default rule (65535). This can be handly for ruleset reload. Turned off by default. .It Va net.inet.ip.fw.enable : No 1 Enables the firewall. Setting this variable to 0 lets you run your machine without firewall even if compiled in. .It Va net.inet6.ip6.fw.enable : No 1 provides the same functionality as above for the IPv6 case. .It Va net.inet.ip.fw.one_pass : No 1 When set, the packet exiting from the .Nm dummynet pipe or from .Xr ng_ipfw 4 node is not passed though the firewall again. Otherwise, after an action, the packet is reinjected into the firewall at the next rule. .It Va net.inet.ip.fw.tables_max : No 128 Maximum number of tables. .It Va net.inet.ip.fw.verbose : No 1 Enables verbose messages. .It Va net.inet.ip.fw.verbose_limit : No 0 Limits the number of messages produced by a verbose firewall. .It Va net.inet6.ip6.fw.deny_unknown_exthdrs : No 1 If enabled packets with unknown IPv6 Extension Headers will be denied. .It Va net.link.ether.ipfw : No 0 Controls whether layer-2 packets are passed to .Nm . Default is no. .It Va net.link.bridge.ipfw : No 0 Controls whether bridged packets are passed to .Nm . Default is no. .It Va net.inet.ip.fw.nat64_allow_private : No 0 Defines how .Nm nat64 handles private IPv4 addresses: .Bl -tag -width indent .It Cm 0 Packets with private IPv4 will not be handled by translator .It Cm 1 Translator will accept and process packets with private IPv4 addresses. .El .It Va net.inet.ip.fw.nat64_debug : No 0 Controls debugging messages produced by .Nm ipfw_nat64 module. .It Va net.inet.ip.fw.nat64_direct_output : No 0 Controls the output method used by .Nm ipfw_nat64 module: .Bl -tag -width indent .It Cm 0 A packet is handled by .Nm ipfw twice. First time an original packet is handled by .Nm ipfw and consumed by .Nm ipfw_nat64 translator. Then translated packet is queued via netisr to input processing again. .It Cm 1 A packet is handled by .Nm ipfw only once, and after translation it will be pushed directly to outgoing interface. .El .El .Sh INTERNAL DIAGNOSTICS There are some commands that may be useful to understand current state of certain subsystems inside kernel module. These commands provide debugging output which may change without notice. .Pp Currently the following commands are available as .Cm internal sub-options: .Bl -tag -width indent .It Cm iflist Lists all interface which are currently tracked by .Nm with their in-kernel status. .It Cm talist List all table lookup algorithms currently available. .El .Sh EXAMPLES There are far too many possible uses of .Nm so this Section will only give a small set of examples. .Pp .Ss BASIC PACKET FILTERING This command adds an entry which denies all tcp packets from .Em cracker.evil.org to the telnet port of .Em wolf.tambov.su from being forwarded by the host: .Pp .Dl "ipfw add deny tcp from cracker.evil.org to wolf.tambov.su telnet" .Pp This one disallows any connection from the entire cracker's network to my host: .Pp .Dl "ipfw add deny ip from 123.45.67.0/24 to my.host.org" .Pp A first and efficient way to limit access (not using dynamic rules) is the use of the following rules: .Pp .Dl "ipfw add allow tcp from any to any established" .Dl "ipfw add allow tcp from net1 portlist1 to net2 portlist2 setup" .Dl "ipfw add allow tcp from net3 portlist3 to net3 portlist3 setup" .Dl "..." .Dl "ipfw add deny tcp from any to any" .Pp The first rule will be a quick match for normal TCP packets, but it will not match the initial SYN packet, which will be matched by the .Cm setup rules only for selected source/destination pairs. All other SYN packets will be rejected by the final .Cm deny rule. .Pp If you administer one or more subnets, you can take advantage of the address sets and or-blocks and write extremely compact rulesets which selectively enable services to blocks of clients, as below: .Pp .Dl "goodguys=\*q{ 10.1.2.0/24{20,35,66,18} or 10.2.3.0/28{6,3,11} }\*q" .Dl "badguys=\*q10.1.2.0/24{8,38,60}\*q" .Dl "" .Dl "ipfw add allow ip from ${goodguys} to any" .Dl "ipfw add deny ip from ${badguys} to any" .Dl "... normal policies ..." .Pp The .Cm verrevpath option could be used to do automated anti-spoofing by adding the following to the top of a ruleset: .Pp .Dl "ipfw add deny ip from any to any not verrevpath in" .Pp This rule drops all incoming packets that appear to be coming to the system on the wrong interface. For example, a packet with a source address belonging to a host on a protected internal network would be dropped if it tried to enter the system from an external interface. .Pp The .Cm antispoof option could be used to do similar but more restricted anti-spoofing by adding the following to the top of a ruleset: .Pp .Dl "ipfw add deny ip from any to any not antispoof in" .Pp This rule drops all incoming packets that appear to be coming from another directly connected system but on the wrong interface. For example, a packet with a source address of .Li 192.168.0.0/24 , configured on .Li fxp0 , but coming in on .Li fxp1 would be dropped. .Pp The .Cm setdscp option could be used to (re)mark user traffic, by adding the following to the appropriate place in ruleset: .Pp .Dl "ipfw add setdscp be ip from any to any dscp af11,af21" .Ss DYNAMIC RULES In order to protect a site from flood attacks involving fake TCP packets, it is safer to use dynamic rules: .Pp .Dl "ipfw add check-state" .Dl "ipfw add deny tcp from any to any established" .Dl "ipfw add allow tcp from my-net to any setup keep-state" .Pp This will let the firewall install dynamic rules only for those connection which start with a regular SYN packet coming from the inside of our network. Dynamic rules are checked when encountering the first occurrence of a .Cm check-state , .Cm keep-state or .Cm limit rule. A .Cm check-state rule should usually be placed near the beginning of the ruleset to minimize the amount of work scanning the ruleset. Your mileage may vary. .Pp For more complex scenarios with dynamic rules .Cm record-state and .Cm defer-action can be used to precisely control creation and checking of dynamic rules. Example of usage of these options are provided in .Sx NETWORK ADDRESS TRANSLATION (NAT) Section. .Pp To limit the number of connections a user can open you can use the following type of rules: .Pp .Dl "ipfw add allow tcp from my-net/24 to any setup limit src-addr 10" .Dl "ipfw add allow tcp from any to me setup limit src-addr 4" .Pp The former (assuming it runs on a gateway) will allow each host on a /24 network to open at most 10 TCP connections. The latter can be placed on a server to make sure that a single client does not use more than 4 simultaneous connections. .Pp .Em BEWARE : stateful rules can be subject to denial-of-service attacks by a SYN-flood which opens a huge number of dynamic rules. The effects of such attacks can be partially limited by acting on a set of .Xr sysctl 8 variables which control the operation of the firewall. .Pp Here is a good usage of the .Cm list command to see accounting records and timestamp information: .Pp .Dl ipfw -at list .Pp or in short form without timestamps: .Pp .Dl ipfw -a list .Pp which is equivalent to: .Pp .Dl ipfw show .Pp Next rule diverts all incoming packets from 192.168.2.0/24 to divert port 5000: .Pp .Dl ipfw divert 5000 ip from 192.168.2.0/24 to any in .Ss TRAFFIC SHAPING The following rules show some of the applications of .Nm and .Nm dummynet for simulations and the like. .Pp This rule drops random incoming packets with a probability of 5%: .Pp .Dl "ipfw add prob 0.05 deny ip from any to any in" .Pp A similar effect can be achieved making use of .Nm dummynet pipes: .Pp .Dl "ipfw add pipe 10 ip from any to any" .Dl "ipfw pipe 10 config plr 0.05" .Pp We can use pipes to artificially limit bandwidth, e.g.\& on a machine acting as a router, if we want to limit traffic from local clients on 192.168.2.0/24 we do: .Pp .Dl "ipfw add pipe 1 ip from 192.168.2.0/24 to any out" .Dl "ipfw pipe 1 config bw 300Kbit/s queue 50KBytes" .Pp note that we use the .Cm out modifier so that the rule is not used twice. Remember in fact that .Nm rules are checked both on incoming and outgoing packets. .Pp Should we want to simulate a bidirectional link with bandwidth limitations, the correct way is the following: .Pp .Dl "ipfw add pipe 1 ip from any to any out" .Dl "ipfw add pipe 2 ip from any to any in" .Dl "ipfw pipe 1 config bw 64Kbit/s queue 10Kbytes" .Dl "ipfw pipe 2 config bw 64Kbit/s queue 10Kbytes" .Pp The above can be very useful, e.g.\& if you want to see how your fancy Web page will look for a residential user who is connected only through a slow link. You should not use only one pipe for both directions, unless you want to simulate a half-duplex medium (e.g.\& AppleTalk, Ethernet, IRDA). It is not necessary that both pipes have the same configuration, so we can also simulate asymmetric links. .Pp Should we want to verify network performance with the RED queue management algorithm: .Pp .Dl "ipfw add pipe 1 ip from any to any" .Dl "ipfw pipe 1 config bw 500Kbit/s queue 100 red 0.002/30/80/0.1" .Pp Another typical application of the traffic shaper is to introduce some delay in the communication. This can significantly affect applications which do a lot of Remote Procedure Calls, and where the round-trip-time of the connection often becomes a limiting factor much more than bandwidth: .Pp .Dl "ipfw add pipe 1 ip from any to any out" .Dl "ipfw add pipe 2 ip from any to any in" .Dl "ipfw pipe 1 config delay 250ms bw 1Mbit/s" .Dl "ipfw pipe 2 config delay 250ms bw 1Mbit/s" .Pp Per-flow queueing can be useful for a variety of purposes. A very simple one is counting traffic: .Pp .Dl "ipfw add pipe 1 tcp from any to any" .Dl "ipfw add pipe 1 udp from any to any" .Dl "ipfw add pipe 1 ip from any to any" .Dl "ipfw pipe 1 config mask all" .Pp The above set of rules will create queues (and collect statistics) for all traffic. Because the pipes have no limitations, the only effect is collecting statistics. Note that we need 3 rules, not just the last one, because when .Nm tries to match IP packets it will not consider ports, so we would not see connections on separate ports as different ones. .Pp A more sophisticated example is limiting the outbound traffic on a net with per-host limits, rather than per-network limits: .Pp .Dl "ipfw add pipe 1 ip from 192.168.2.0/24 to any out" .Dl "ipfw add pipe 2 ip from any to 192.168.2.0/24 in" .Dl "ipfw pipe 1 config mask src-ip 0x000000ff bw 200Kbit/s queue 20Kbytes" .Dl "ipfw pipe 2 config mask dst-ip 0x000000ff bw 200Kbit/s queue 20Kbytes" .Ss LOOKUP TABLES In the following example, we need to create several traffic bandwidth classes and we need different hosts/networks to fall into different classes. We create one pipe for each class and configure them accordingly. Then we create a single table and fill it with IP subnets and addresses. For each subnet/host we set the argument equal to the number of the pipe that it should use. Then we classify traffic using a single rule: .Pp .Dl "ipfw pipe 1 config bw 1000Kbyte/s" .Dl "ipfw pipe 4 config bw 4000Kbyte/s" .Dl "..." .Dl "ipfw table T1 create type addr" .Dl "ipfw table T1 add 192.168.2.0/24 1" .Dl "ipfw table T1 add 192.168.0.0/27 4" .Dl "ipfw table T1 add 192.168.0.2 1" .Dl "..." .Dl "ipfw add pipe tablearg ip from 'table(T1)' to any" .Pp Using the .Cm fwd action, the table entries may include hostnames and IP addresses. .Pp .Dl "ipfw table T2 create type addr ftype ip" .Dl "ipfw table T2 add 192.168.2.0/24 10.23.2.1" .Dl "ipfw table T21 add 192.168.0.0/27 router1.dmz" .Dl "..." .Dl "ipfw add 100 fwd tablearg ip from any to table(1)" .Pp In the following example per-interface firewall is created: .Pp .Dl "ipfw table IN create type iface valtype skipto,fib" .Dl "ipfw table IN add vlan20 12000,12" .Dl "ipfw table IN add vlan30 13000,13" .Dl "ipfw table OUT create type iface valtype skipto" .Dl "ipfw table OUT add vlan20 22000" .Dl "ipfw table OUT add vlan30 23000" .Dl ".." .Dl "ipfw add 100 setfib tablearg ip from any to any recv 'table(IN)' in" .Dl "ipfw add 200 skipto tablearg ip from any to any recv 'table(IN)' in" .Dl "ipfw add 300 skipto tablearg ip from any to any xmit 'table(OUT)' out" .Pp The following example illustrate usage of flow tables: .Pp .Dl "ipfw table fl create type flow:src-ip,proto,dst-ip,dst-port" .Dl "ipfw table fl add 2a02:6b8:77::88,tcp,2a02:6b8:77::99,80 11" .Dl "ipfw table fl add 10.0.0.1,udp,10.0.0.2,53 12" .Dl ".." .Dl "ipfw add 100 allow ip from any to any flow 'table(fl,11)' recv ix0" .Ss SETS OF RULES To add a set of rules atomically, e.g.\& set 18: .Pp .Dl "ipfw set disable 18" .Dl "ipfw add NN set 18 ... # repeat as needed" .Dl "ipfw set enable 18" .Pp To delete a set of rules atomically the command is simply: .Pp .Dl "ipfw delete set 18" .Pp To test a ruleset and disable it and regain control if something goes wrong: .Pp .Dl "ipfw set disable 18" .Dl "ipfw add NN set 18 ... # repeat as needed" .Dl "ipfw set enable 18; echo done; sleep 30 && ipfw set disable 18" .Pp Here if everything goes well, you press control-C before the "sleep" terminates, and your ruleset will be left active. Otherwise, e.g.\& if you cannot access your box, the ruleset will be disabled after the sleep terminates thus restoring the previous situation. .Pp To show rules of the specific set: .Pp .Dl "ipfw set 18 show" .Pp To show rules of the disabled set: .Pp .Dl "ipfw -S set 18 show" .Pp To clear a specific rule counters of the specific set: .Pp .Dl "ipfw set 18 zero NN" .Pp To delete a specific rule of the specific set: .Pp .Dl "ipfw set 18 delete NN" .Ss NAT, REDIRECT AND LSNAT First redirect all the traffic to nat instance 123: .Pp .Dl "ipfw add nat 123 all from any to any" .Pp Then to configure nat instance 123 to alias all the outgoing traffic with ip 192.168.0.123, blocking all incoming connections, trying to keep same ports on both sides, clearing aliasing table on address change and keeping a log of traffic/link statistics: .Pp .Dl "ipfw nat 123 config ip 192.168.0.123 log deny_in reset same_ports" .Pp Or to change address of instance 123, aliasing table will be cleared (see reset option): .Pp .Dl "ipfw nat 123 config ip 10.0.0.1" .Pp To see configuration of nat instance 123: .Pp .Dl "ipfw nat 123 show config" .Pp To show logs of all the instances in range 111-999: .Pp .Dl "ipfw nat 111-999 show" .Pp To see configurations of all instances: .Pp .Dl "ipfw nat show config" .Pp Or a redirect rule with mixed modes could looks like: .Pp .Dl "ipfw nat 123 config redirect_addr 10.0.0.1 10.0.0.66" .Dl " redirect_port tcp 192.168.0.1:80 500" .Dl " redirect_proto udp 192.168.1.43 192.168.1.1" .Dl " redirect_addr 192.168.0.10,192.168.0.11" .Dl " 10.0.0.100 # LSNAT" .Dl " redirect_port tcp 192.168.0.1:80,192.168.0.10:22" .Dl " 500 # LSNAT" .Pp or it could be split in: .Pp .Dl "ipfw nat 1 config redirect_addr 10.0.0.1 10.0.0.66" .Dl "ipfw nat 2 config redirect_port tcp 192.168.0.1:80 500" .Dl "ipfw nat 3 config redirect_proto udp 192.168.1.43 192.168.1.1" .Dl "ipfw nat 4 config redirect_addr 192.168.0.10,192.168.0.11,192.168.0.12" .Dl " 10.0.0.100" .Dl "ipfw nat 5 config redirect_port tcp" .Dl " 192.168.0.1:80,192.168.0.10:22,192.168.0.20:25 500" .Pp Sometimes you may want to mix NAT and dynamic rules. It could be achived with .Cm record-state and .Cm defer-action options. Problem is, you need to create dynamic rule before NAT and check it after NAT actions (or vice versa) to have consistent addresses and ports. Rule with .Cm keep-state option will trigger activation of existing dynamic state, and action of such rule will be performed as soon as rule is matched. In case of NAT and .Cm allow rule packet need to be passed to NAT, not allowed as soon is possible. .Pp There is example of set of rules to achive this. Bear in mind that this is exmaple only and it is not very usefult by itself. .Pp On way out, after all checks place this rules: .Pp .Dl "ipfw add allow record-state skip-action" .Dl "ipfw add nat 1" .Pp And on way in there should be something like this: .Pp .Dl "ipfw add nat 1" .Dl "ipfw add check-state" .Pp Please note, that first rule on way out doesn't allow packet and doesn't execute existing dynamic rules. All it does, create new dynamic rule with .Cm allow action, if it is not created yet. Later, this dynamic rule is used on way in by .Cm check-state rule. .Ss CONFIGURING CODEL, PIE, FQ-CODEL and FQ-PIE AQM .Cm codel and .Cm pie AQM can be configured for .Nm dummynet .Cm pipe or .Cm queue. .Pp To configure a .Cm pipe with .Cm codel AQM using default configuration for traffic from 192.168.0.0/24 and 1Mbits/s rate limit, we do: .Pp .Dl "ipfw pipe 1 config bw 1mbits/s codel" .Dl "ipfw add 100 pipe 1 ip from 192.168.0.0/24 to any" .Pp To configure a .Cm queue with .Cm codel AQM using different configurations parameters for traffic from 192.168.0.0/24 and 1Mbits/s rate limit, we do: .Pp .Dl "ipfw pipe 1 config bw 1mbits/s" .Dl "ipfw queue 1 config pipe 1 codel target 8ms interval 160ms ecn" .Dl "ipfw add 100 queue 1 ip from 192.168.0.0/24 to any" .Pp To configure a .Cm pipe with .Cm pie AQM using default configuration for traffic from 192.168.0.0/24 and 1Mbits/s rate limit, we do: .Pp .Dl "ipfw pipe 1 config bw 1mbits/s pie" .Dl "ipfw add 100 pipe 1 ip from 192.168.0.0/24 to any" .Pp To configure a .Cm queue with .Cm pie AQM using different configuration parameters for traffic from 192.168.0.0/24 and 1Mbits/s rate limit, we do: .Pp .Dl "ipfw pipe 1 config bw 1mbits/s" .Dl "ipfw queue 1 config pipe 1 pie target 20ms tupdate 30ms ecn" .Dl "ipfw add 100 queue 1 ip from 192.168.0.0/24 to any" .Pp .Cm fq_codel and .Cm fq_pie AQM can be configured for .Nm dummynet schedulers. .Pp To configure .Cm fq_codel scheduler using different configurations parameters for traffic from 192.168.0.0/24 and 1Mbits/s rate limit, we do: .Pp .Dl "ipfw pipe 1 config bw 1mbits/s" .Dl "ipfw sched 1 config pipe 1 type fq_codel" .Dl "ipfw queue 1 config sched 1" .Dl "ipfw add 100 queue 1 ip from 192.168.0.0/24 to any" .Pp To change .Cm fq_codel default configuration for a .Cm sched such as disable ECN and change the .Ar target to 10ms, we do: .Pp .Dl "ipfw sched 1 config pipe 1 type fq_codel target 10ms noecn" .Pp Similar to .Cm fq_codel, to configure .Cm fq_pie scheduler using different configurations parameters for traffic from 192.168.0.0/24 and 1Mbits/s rate limit, we do: .Pp .Dl "ipfw pipe 1 config bw 1mbits/s" .Dl "ipfw sched 1 config pipe 1 type fq_pie" .Dl "ipfw queue 1 config sched 1" .Dl "ipfw add 100 queue 1 ip from 192.168.0.0/24 to any" .Pp The configurations of .Cm fq_pie .Cm sched can be changed in a similar way as for .Cm fq_codel .Sh SEE ALSO .Xr cpp 1 , .Xr m4 1 , .Xr altq 4 , .Xr divert 4 , .Xr dummynet 4 , .Xr if_bridge 4 , .Xr ip 4 , .Xr ipfirewall 4 , .Xr ng_ipfw 4 , .Xr protocols 5 , .Xr services 5 , .Xr init 8 , .Xr kldload 8 , .Xr reboot 8 , .Xr sysctl 8 , .Xr syslogd 8 .Sh HISTORY The .Nm utility first appeared in .Fx 2.0 . .Nm dummynet was introduced in .Fx 2.2.8 . Stateful extensions were introduced in .Fx 4.0 . .Nm ipfw2 was introduced in Summer 2002. .Sh AUTHORS .An Ugen J. S. Antsilevich , .An Poul-Henning Kamp , .An Alex Nash , .An Archie Cobbs , .An Luigi Rizzo , .An Rasool Al-Saadi . .Pp .An -nosplit API based upon code written by .An Daniel Boulet for BSDI. .Pp Dummynet has been introduced by Luigi Rizzo in 1997-1998. .Pp Some early work (1999-2000) on the .Nm dummynet traffic shaper supported by Akamba Corp. .Pp The ipfw core (ipfw2) has been completely redesigned and reimplemented by Luigi Rizzo in summer 2002. Further actions and options have been added by various developer over the years. .Pp .An -nosplit In-kernel NAT support written by .An Paolo Pisati Aq Mt piso@FreeBSD.org as part of a Summer of Code 2005 project. .Pp SCTP .Nm nat support has been developed by .An The Centre for Advanced Internet Architectures (CAIA) Aq http://www.caia.swin.edu.au . The primary developers and maintainers are David Hayes and Jason But. For further information visit: .Aq http://www.caia.swin.edu.au/urp/SONATA .Pp Delay profiles have been developed by Alessandro Cerri and Luigi Rizzo, supported by the European Commission within Projects Onelab and Onelab2. .Pp CoDel, PIE, FQ-CoDel and FQ-PIE AQM for Dummynet have been implemented by .An The Centre for Advanced Internet Architectures (CAIA) in 2016, supported by The Comcast Innovation Fund. The primary developer is Rasool Al-Saadi. .Sh BUGS The syntax has grown over the years and sometimes it might be confusing. Unfortunately, backward compatibility prevents cleaning up mistakes made in the definition of the syntax. .Pp .Em !!! WARNING !!! .Pp Misconfiguring the firewall can put your computer in an unusable state, possibly shutting down network services and requiring console access to regain control of it. .Pp Incoming packet fragments diverted by .Cm divert are reassembled before delivery to the socket. The action used on those packet is the one from the rule which matches the first fragment of the packet. .Pp Packets diverted to userland, and then reinserted by a userland process may lose various packet attributes. The packet source interface name will be preserved if it is shorter than 8 bytes and the userland process saves and reuses the sockaddr_in (as does .Xr natd 8 ) ; otherwise, it may be lost. If a packet is reinserted in this manner, later rules may be incorrectly applied, making the order of .Cm divert rules in the rule sequence very important. .Pp Dummynet drops all packets with IPv6 link-local addresses. .Pp Rules using .Cm uid or .Cm gid may not behave as expected. In particular, incoming SYN packets may have no uid or gid associated with them since they do not yet belong to a TCP connection, and the uid/gid associated with a packet may not be as expected if the associated process calls .Xr setuid 2 or similar system calls. .Pp Rule syntax is subject to the command line environment and some patterns may need to be escaped with the backslash character or quoted appropriately. .Pp Due to the architecture of .Xr libalias 3 , ipfw nat is not compatible with the TCP segmentation offloading (TSO). Thus, to reliably nat your network traffic, please disable TSO on your NICs using .Xr ifconfig 8 . .Pp ICMP error messages are not implicitly matched by dynamic rules for the respective conversations. To avoid failures of network error detection and path MTU discovery, ICMP error messages may need to be allowed explicitly through static rules. .Pp Rules using .Cm call and .Cm return actions may lead to confusing behaviour if ruleset has mistakes, and/or interaction with other subsystems (netgraph, dummynet, etc.) is used. One possible case for this is packet leaving .Nm in subroutine on the input pass, while later on output encountering unpaired .Cm return first. As the call stack is kept intact after input pass, packet will suddenly return to the rule number used on input pass, not on output one. Order of processing should be checked carefully to avoid such mistakes. Index: projects/clang700-import/sbin/newfs/mkfs.c =================================================================== --- projects/clang700-import/sbin/newfs/mkfs.c (revision 340426) +++ projects/clang700-import/sbin/newfs/mkfs.c (revision 340427) @@ -1,1195 +1,1190 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Marshall * Kirk McKusick and Network Associates Laboratories, the Security * Research Division of Network Associates, Inc. under DARPA/SPAWAR * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS * research program. * * Copyright (c) 1980, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static char sccsid[] = "@(#)mkfs.c 8.11 (Berkeley) 5/3/95"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); #define IN_RTLD /* So we pickup the P_OSREL defines */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "newfs.h" /* * make file system for cylinder-group style file systems */ #define UMASK 0755 #define POWEROF2(num) (((num) & ((num) - 1)) == 0) static struct csum *fscs; #define sblock disk.d_fs #define acg disk.d_cg union dinode { struct ufs1_dinode dp1; struct ufs2_dinode dp2; }; #define DIP(dp, field) \ ((sblock.fs_magic == FS_UFS1_MAGIC) ? \ (dp)->dp1.field : (dp)->dp2.field) static caddr_t iobuf; static long iobufsize; static ufs2_daddr_t alloc(int size, int mode); static int charsperline(void); static void clrblock(struct fs *, unsigned char *, int); static void fsinit(time_t); static int ilog2(int); static void initcg(int, time_t); static int isblock(struct fs *, unsigned char *, int); static void iput(union dinode *, ino_t); static int makedir(struct direct *, int); static void setblock(struct fs *, unsigned char *, int); static void wtfs(ufs2_daddr_t, int, char *); static u_int32_t newfs_random(void); void mkfs(struct partition *pp, char *fsys) { int fragsperinode, optimalfpg, origdensity, minfpg, lastminfpg; long i, j, csfrags; uint cg; time_t utime; quad_t sizepb; int width; ino_t maxinum; int minfragsperinode; /* minimum ratio of frags to inodes */ char tmpbuf[100]; /* XXX this will break in about 2,500 years */ struct fsrecovery *fsr; char *fsrbuf; union { struct fs fdummy; char cdummy[SBLOCKSIZE]; } dummy; #define fsdummy dummy.fdummy #define chdummy dummy.cdummy /* * Our blocks == sector size, and the version of UFS we are using is * specified by Oflag. */ disk.d_bsize = sectorsize; disk.d_ufs = Oflag; if (Rflag) utime = 1000000000; else time(&utime); sblock.fs_old_flags = FS_FLAGS_UPDATED; sblock.fs_flags = 0; if (Uflag) sblock.fs_flags |= FS_DOSOFTDEP; if (Lflag) strlcpy(sblock.fs_volname, volumelabel, MAXVOLLEN); if (Jflag) sblock.fs_flags |= FS_GJOURNAL; if (lflag) sblock.fs_flags |= FS_MULTILABEL; if (tflag) sblock.fs_flags |= FS_TRIM; /* * Validate the given file system size. * Verify that its last block can actually be accessed. * Convert to file system fragment sized units. */ if (fssize <= 0) { printf("preposterous size %jd\n", (intmax_t)fssize); exit(13); } wtfs(fssize - (realsectorsize / DEV_BSIZE), realsectorsize, (char *)&sblock); /* * collect and verify the file system density info */ sblock.fs_avgfilesize = avgfilesize; sblock.fs_avgfpdir = avgfilesperdir; if (sblock.fs_avgfilesize <= 0) printf("illegal expected average file size %d\n", sblock.fs_avgfilesize), exit(14); if (sblock.fs_avgfpdir <= 0) printf("illegal expected number of files per directory %d\n", sblock.fs_avgfpdir), exit(15); restart: /* * collect and verify the block and fragment sizes */ sblock.fs_bsize = bsize; sblock.fs_fsize = fsize; if (!POWEROF2(sblock.fs_bsize)) { printf("block size must be a power of 2, not %d\n", sblock.fs_bsize); exit(16); } if (!POWEROF2(sblock.fs_fsize)) { printf("fragment size must be a power of 2, not %d\n", sblock.fs_fsize); exit(17); } if (sblock.fs_fsize < sectorsize) { printf("increasing fragment size from %d to sector size (%d)\n", sblock.fs_fsize, sectorsize); sblock.fs_fsize = sectorsize; } if (sblock.fs_bsize > MAXBSIZE) { printf("decreasing block size from %d to maximum (%d)\n", sblock.fs_bsize, MAXBSIZE); sblock.fs_bsize = MAXBSIZE; } if (sblock.fs_bsize < MINBSIZE) { printf("increasing block size from %d to minimum (%d)\n", sblock.fs_bsize, MINBSIZE); sblock.fs_bsize = MINBSIZE; } if (sblock.fs_fsize > MAXBSIZE) { printf("decreasing fragment size from %d to maximum (%d)\n", sblock.fs_fsize, MAXBSIZE); sblock.fs_fsize = MAXBSIZE; } if (sblock.fs_bsize < sblock.fs_fsize) { printf("increasing block size from %d to fragment size (%d)\n", sblock.fs_bsize, sblock.fs_fsize); sblock.fs_bsize = sblock.fs_fsize; } if (sblock.fs_fsize * MAXFRAG < sblock.fs_bsize) { printf( "increasing fragment size from %d to block size / %d (%d)\n", sblock.fs_fsize, MAXFRAG, sblock.fs_bsize / MAXFRAG); sblock.fs_fsize = sblock.fs_bsize / MAXFRAG; } if (maxbsize == 0) maxbsize = bsize; if (maxbsize < bsize || !POWEROF2(maxbsize)) { sblock.fs_maxbsize = sblock.fs_bsize; printf("Extent size set to %d\n", sblock.fs_maxbsize); } else if (sblock.fs_maxbsize > FS_MAXCONTIG * sblock.fs_bsize) { sblock.fs_maxbsize = FS_MAXCONTIG * sblock.fs_bsize; printf("Extent size reduced to %d\n", sblock.fs_maxbsize); } else { sblock.fs_maxbsize = maxbsize; } /* * Maxcontig sets the default for the maximum number of blocks * that may be allocated sequentially. With file system clustering * it is possible to allocate contiguous blocks up to the maximum * transfer size permitted by the controller or buffering. */ if (maxcontig == 0) maxcontig = MAX(1, MAXPHYS / bsize); sblock.fs_maxcontig = maxcontig; if (sblock.fs_maxcontig < sblock.fs_maxbsize / sblock.fs_bsize) { sblock.fs_maxcontig = sblock.fs_maxbsize / sblock.fs_bsize; printf("Maxcontig raised to %d\n", sblock.fs_maxbsize); } if (sblock.fs_maxcontig > 1) sblock.fs_contigsumsize = MIN(sblock.fs_maxcontig,FS_MAXCONTIG); sblock.fs_bmask = ~(sblock.fs_bsize - 1); sblock.fs_fmask = ~(sblock.fs_fsize - 1); sblock.fs_qbmask = ~sblock.fs_bmask; sblock.fs_qfmask = ~sblock.fs_fmask; sblock.fs_bshift = ilog2(sblock.fs_bsize); sblock.fs_fshift = ilog2(sblock.fs_fsize); sblock.fs_frag = numfrags(&sblock, sblock.fs_bsize); sblock.fs_fragshift = ilog2(sblock.fs_frag); if (sblock.fs_frag > MAXFRAG) { printf("fragment size %d is still too small (can't happen)\n", sblock.fs_bsize / MAXFRAG); exit(21); } sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize); sblock.fs_size = fssize = dbtofsb(&sblock, fssize); sblock.fs_providersize = dbtofsb(&sblock, mediasize / sectorsize); /* * Before the filesystem is finally initialized, mark it * as incompletely initialized. */ sblock.fs_magic = FS_BAD_MAGIC; if (Oflag == 1) { sblock.fs_sblockloc = SBLOCK_UFS1; sblock.fs_sblockactualloc = SBLOCK_UFS1; sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs1_daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode); sblock.fs_maxsymlinklen = ((UFS_NDADDR + UFS_NIADDR) * sizeof(ufs1_daddr_t)); sblock.fs_old_inodefmt = FS_44INODEFMT; sblock.fs_old_cgoffset = 0; sblock.fs_old_cgmask = 0xffffffff; sblock.fs_old_size = sblock.fs_size; sblock.fs_old_rotdelay = 0; sblock.fs_old_rps = 60; sblock.fs_old_nspf = sblock.fs_fsize / sectorsize; sblock.fs_old_cpg = 1; sblock.fs_old_interleave = 1; sblock.fs_old_trackskew = 0; sblock.fs_old_cpc = 0; sblock.fs_old_postblformat = 1; sblock.fs_old_nrpos = 1; } else { sblock.fs_sblockloc = SBLOCK_UFS2; sblock.fs_sblockactualloc = SBLOCK_UFS2; sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs2_daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs2_dinode); sblock.fs_maxsymlinklen = ((UFS_NDADDR + UFS_NIADDR) * sizeof(ufs2_daddr_t)); } sblock.fs_sblkno = roundup(howmany(sblock.fs_sblockloc + SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag); sblock.fs_cblkno = sblock.fs_sblkno + roundup(howmany(SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag); sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag; sblock.fs_maxfilesize = sblock.fs_bsize * UFS_NDADDR - 1; for (sizepb = sblock.fs_bsize, i = 0; i < UFS_NIADDR; i++) { sizepb *= NINDIR(&sblock); sblock.fs_maxfilesize += sizepb; } /* * It's impossible to create a snapshot in case that fs_maxfilesize * is smaller than the fssize. */ if (sblock.fs_maxfilesize < (u_quad_t)fssize) { warnx("WARNING: You will be unable to create snapshots on this " "file system. Correct by using a larger blocksize."); } /* * Calculate the number of blocks to put into each cylinder group. * * This algorithm selects the number of blocks per cylinder * group. The first goal is to have at least enough data blocks * in each cylinder group to meet the density requirement. Once * this goal is achieved we try to expand to have at least * MINCYLGRPS cylinder groups. Once this goal is achieved, we * pack as many blocks into each cylinder group map as will fit. * * We start by calculating the smallest number of blocks that we * can put into each cylinder group. If this is too big, we reduce * the density until it fits. */ maxinum = (((int64_t)(1)) << 32) - INOPB(&sblock); minfragsperinode = 1 + fssize / maxinum; if (density == 0) { density = MAX(NFPI, minfragsperinode) * fsize; } else if (density < minfragsperinode * fsize) { origdensity = density; density = minfragsperinode * fsize; fprintf(stderr, "density increased from %d to %d\n", origdensity, density); } origdensity = density; for (;;) { fragsperinode = MAX(numfrags(&sblock, density), 1); if (fragsperinode < minfragsperinode) { bsize <<= 1; fsize <<= 1; printf("Block size too small for a file system %s %d\n", "of this size. Increasing blocksize to", bsize); goto restart; } minfpg = fragsperinode * INOPB(&sblock); if (minfpg > sblock.fs_size) minfpg = sblock.fs_size; sblock.fs_ipg = INOPB(&sblock); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) break; density -= sblock.fs_fsize; } if (density != origdensity) printf("density reduced from %d to %d\n", origdensity, density); /* * Start packing more blocks into the cylinder group until * it cannot grow any larger, the number of cylinder groups * drops below MINCYLGRPS, or we reach the size requested. * For UFS1 inodes per cylinder group are stored in an int16_t * so fs_ipg is limited to 2^15 - 1. */ for ( ; sblock.fs_fpg < maxblkspercg; sblock.fs_fpg += sblock.fs_frag) { sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (Oflag > 1 || (Oflag == 1 && sblock.fs_ipg <= 0x7fff)) { if (sblock.fs_size / sblock.fs_fpg < MINCYLGRPS) break; if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) continue; if (CGSIZE(&sblock) == (unsigned long)sblock.fs_bsize) break; } sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); break; } /* * Check to be sure that the last cylinder group has enough blocks * to be viable. If it is too small, reduce the number of blocks * per cylinder group which will have the effect of moving more * blocks into the last cylinder group. */ optimalfpg = sblock.fs_fpg; for (;;) { sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg); lastminfpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_size < lastminfpg) { printf("Filesystem size %jd < minimum size of %d\n", (intmax_t)sblock.fs_size, lastminfpg); exit(28); } if (sblock.fs_size % sblock.fs_fpg >= lastminfpg || sblock.fs_size % sblock.fs_fpg == 0) break; sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); } if (optimalfpg != sblock.fs_fpg) printf("Reduced frags per cylinder group from %d to %d %s\n", optimalfpg, sblock.fs_fpg, "to enlarge last cyl group"); sblock.fs_cgsize = fragroundup(&sblock, CGSIZE(&sblock)); sblock.fs_dblkno = sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock); if (Oflag == 1) { sblock.fs_old_spc = sblock.fs_fpg * sblock.fs_old_nspf; sblock.fs_old_nsect = sblock.fs_old_spc; sblock.fs_old_npsect = sblock.fs_old_spc; sblock.fs_old_ncyl = sblock.fs_ncg; } /* * fill in remaining fields of the super block */ sblock.fs_csaddr = cgdmin(&sblock, 0); sblock.fs_cssize = fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum)); fscs = (struct csum *)calloc(1, sblock.fs_cssize); if (fscs == NULL) errx(31, "calloc failed"); sblock.fs_sbsize = fragroundup(&sblock, sizeof(struct fs)); if (sblock.fs_sbsize > SBLOCKSIZE) sblock.fs_sbsize = SBLOCKSIZE; if (sblock.fs_sbsize < realsectorsize) sblock.fs_sbsize = realsectorsize; sblock.fs_minfree = minfree; if (metaspace > 0 && metaspace < sblock.fs_fpg / 2) sblock.fs_metaspace = blknum(&sblock, metaspace); else if (metaspace != -1) /* reserve half of minfree for metadata blocks */ sblock.fs_metaspace = blknum(&sblock, (sblock.fs_fpg * minfree) / 200); if (maxbpg == 0) sblock.fs_maxbpg = MAXBLKPG(sblock.fs_bsize); else sblock.fs_maxbpg = maxbpg; sblock.fs_optim = opt; sblock.fs_cgrotor = 0; sblock.fs_pendingblocks = 0; sblock.fs_pendinginodes = 0; sblock.fs_fmod = 0; sblock.fs_ronly = 0; sblock.fs_state = 0; sblock.fs_clean = 1; sblock.fs_id[0] = (long)utime; sblock.fs_id[1] = newfs_random(); sblock.fs_fsmnt[0] = '\0'; csfrags = howmany(sblock.fs_cssize, sblock.fs_fsize); sblock.fs_dsize = sblock.fs_size - sblock.fs_sblkno - sblock.fs_ncg * (sblock.fs_dblkno - sblock.fs_sblkno); sblock.fs_cstotal.cs_nbfree = fragstoblks(&sblock, sblock.fs_dsize) - howmany(csfrags, sblock.fs_frag); sblock.fs_cstotal.cs_nffree = fragnum(&sblock, sblock.fs_size) + (fragnum(&sblock, csfrags) > 0 ? sblock.fs_frag - fragnum(&sblock, csfrags) : 0); sblock.fs_cstotal.cs_nifree = sblock.fs_ncg * sblock.fs_ipg - UFS_ROOTINO; sblock.fs_cstotal.cs_ndir = 0; sblock.fs_dsize -= csfrags; sblock.fs_time = utime; if (Oflag == 1) { sblock.fs_old_time = utime; sblock.fs_old_dsize = sblock.fs_dsize; sblock.fs_old_csaddr = sblock.fs_csaddr; sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } /* * Set flags for metadata that is being check-hashed. * * Metadata check hashes are not supported in the UFS version 1 * filesystem to keep it as small and simple as possible. */ if (Oflag > 1) { sblock.fs_flags |= FS_METACKHASH; if (getosreldate() >= P_OSREL_CK_CYLGRP) sblock.fs_metackhash |= CK_CYLGRP; if (getosreldate() >= P_OSREL_CK_SUPERBLOCK) sblock.fs_metackhash |= CK_SUPERBLOCK; } /* * Dump out summary information about file system. */ # define B2MBFACTOR (1 / (1024.0 * 1024.0)) printf("%s: %.1fMB (%jd sectors) block size %d, fragment size %d\n", fsys, (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR, (intmax_t)fsbtodb(&sblock, sblock.fs_size), sblock.fs_bsize, sblock.fs_fsize); printf("\tusing %d cylinder groups of %.2fMB, %d blks, %d inodes.\n", sblock.fs_ncg, (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR, sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg); if (sblock.fs_flags & FS_DOSOFTDEP) printf("\twith soft updates\n"); # undef B2MBFACTOR if (Eflag && !Nflag) { printf("Erasing sectors [%jd...%jd]\n", sblock.fs_sblockloc / disk.d_bsize, fsbtodb(&sblock, sblock.fs_size) - 1); berase(&disk, sblock.fs_sblockloc / disk.d_bsize, sblock.fs_size * sblock.fs_fsize - sblock.fs_sblockloc); } /* * Wipe out old UFS1 superblock(s) if necessary. */ if (!Nflag && Oflag != 1 && realsectorsize <= SBLOCK_UFS1) { i = bread(&disk, part_ofs + SBLOCK_UFS1 / disk.d_bsize, chdummy, SBLOCKSIZE); if (i == -1) err(1, "can't read old UFS1 superblock: %s", disk.d_error); if (fsdummy.fs_magic == FS_UFS1_MAGIC) { fsdummy.fs_magic = 0; bwrite(&disk, part_ofs + SBLOCK_UFS1 / disk.d_bsize, chdummy, SBLOCKSIZE); for (cg = 0; cg < fsdummy.fs_ncg; cg++) { if (fsbtodb(&fsdummy, cgsblock(&fsdummy, cg)) > fssize) break; bwrite(&disk, part_ofs + fsbtodb(&fsdummy, cgsblock(&fsdummy, cg)), chdummy, SBLOCKSIZE); } } } if (!Nflag && sbput(disk.d_fd, &disk.d_fs, 0) != 0) err(1, "sbput: %s", disk.d_error); if (Xflag == 1) { printf("** Exiting on Xflag 1\n"); exit(0); } if (Xflag == 2) printf("** Leaving BAD MAGIC on Xflag 2\n"); else sblock.fs_magic = (Oflag != 1) ? FS_UFS2_MAGIC : FS_UFS1_MAGIC; /* * Now build the cylinders group blocks and * then print out indices of cylinder groups. */ printf("super-block backups (for fsck_ffs -b #) at:\n"); i = 0; width = charsperline(); /* * Allocate space for two sets of inode blocks. */ iobufsize = 2 * sblock.fs_bsize; if ((iobuf = calloc(1, iobufsize)) == 0) { printf("Cannot allocate I/O buffer\n"); exit(38); } /* * Write out all the cylinder groups and backup superblocks. */ for (cg = 0; cg < sblock.fs_ncg; cg++) { if (!Nflag) initcg(cg, utime); j = snprintf(tmpbuf, sizeof(tmpbuf), " %jd%s", (intmax_t)fsbtodb(&sblock, cgsblock(&sblock, cg)), cg < (sblock.fs_ncg-1) ? "," : ""); if (j < 0) tmpbuf[j = 0] = '\0'; if (i + j >= width) { printf("\n"); i = 0; } i += j; printf("%s", tmpbuf); fflush(stdout); } printf("\n"); if (Nflag) exit(0); /* * Now construct the initial file system, * then write out the super-block. */ fsinit(utime); if (Oflag == 1) { sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } if (Xflag == 3) { printf("** Exiting on Xflag 3\n"); exit(0); } /* * Reference the summary information so it will also be written. */ sblock.fs_csp = fscs; if (sbput(disk.d_fd, &disk.d_fs, 0) != 0) err(1, "sbput: %s", disk.d_error); /* * For UFS1 filesystems with a blocksize of 64K, the first * alternate superblock resides at the location used for * the default UFS2 superblock. As there is a valid * superblock at this location, the boot code will use * it as its first choice. Thus we have to ensure that * all of its statistcs on usage are correct. */ if (Oflag == 1 && sblock.fs_bsize == 65536) wtfs(fsbtodb(&sblock, cgsblock(&sblock, 0)), sblock.fs_bsize, (char *)&sblock); /* * Read the last sector of the boot block, replace the last * 20 bytes with the recovery information, then write it back. * The recovery information only works for UFS2 filesystems. */ if (sblock.fs_magic == FS_UFS2_MAGIC) { if ((fsrbuf = malloc(realsectorsize)) == NULL || bread(&disk, part_ofs + (SBLOCK_UFS2 - realsectorsize) / disk.d_bsize, fsrbuf, realsectorsize) == -1) err(1, "can't read recovery area: %s", disk.d_error); fsr = (struct fsrecovery *)&fsrbuf[realsectorsize - sizeof *fsr]; fsr->fsr_magic = sblock.fs_magic; fsr->fsr_fpg = sblock.fs_fpg; fsr->fsr_fsbtodb = sblock.fs_fsbtodb; fsr->fsr_sblkno = sblock.fs_sblkno; fsr->fsr_ncg = sblock.fs_ncg; wtfs((SBLOCK_UFS2 - realsectorsize) / disk.d_bsize, realsectorsize, fsrbuf); free(fsrbuf); } /* * Update information about this partition in pack * label, to that it may be updated on disk. */ if (pp != NULL) { pp->p_fstype = FS_BSDFFS; pp->p_fsize = sblock.fs_fsize; pp->p_frag = sblock.fs_frag; pp->p_cpg = sblock.fs_fpg; } } /* * Initialize a cylinder group. */ void initcg(int cylno, time_t utime) { long blkno, start; off_t savedactualloc; uint i, j, d, dlower, dupper; ufs2_daddr_t cbase, dmax; struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; struct csum *cs; /* * Determine block bounds for cylinder group. * Allow space for super block summary information in first * cylinder group. */ cbase = cgbase(&sblock, cylno); dmax = cbase + sblock.fs_fpg; if (dmax > sblock.fs_size) dmax = sblock.fs_size; dlower = cgsblock(&sblock, cylno) - cbase; dupper = cgdmin(&sblock, cylno) - cbase; if (cylno == 0) dupper += howmany(sblock.fs_cssize, sblock.fs_fsize); cs = &fscs[cylno]; memset(&acg, 0, sblock.fs_cgsize); acg.cg_time = utime; acg.cg_magic = CG_MAGIC; acg.cg_cgx = cylno; acg.cg_niblk = sblock.fs_ipg; acg.cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock)); acg.cg_ndblk = dmax - cbase; if (sblock.fs_contigsumsize > 0) acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag; start = &acg.cg_space[0] - (u_char *)(&acg.cg_firstfield); if (Oflag == 2) { acg.cg_iusedoff = start; } else { acg.cg_old_ncyl = sblock.fs_old_cpg; acg.cg_old_time = acg.cg_time; acg.cg_time = 0; acg.cg_old_niblk = acg.cg_niblk; acg.cg_niblk = 0; acg.cg_initediblk = 0; acg.cg_old_btotoff = start; acg.cg_old_boff = acg.cg_old_btotoff + sblock.fs_old_cpg * sizeof(int32_t); acg.cg_iusedoff = acg.cg_old_boff + sblock.fs_old_cpg * sizeof(u_int16_t); } acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT); acg.cg_nextfreeoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT); if (sblock.fs_contigsumsize > 0) { acg.cg_clustersumoff = roundup(acg.cg_nextfreeoff, sizeof(u_int32_t)); acg.cg_clustersumoff -= sizeof(u_int32_t); acg.cg_clusteroff = acg.cg_clustersumoff + (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t); acg.cg_nextfreeoff = acg.cg_clusteroff + howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT); } if (acg.cg_nextfreeoff > (unsigned)sblock.fs_cgsize) { printf("Panic: cylinder group too big\n"); exit(37); } acg.cg_cs.cs_nifree += sblock.fs_ipg; if (cylno == 0) for (i = 0; i < (long)UFS_ROOTINO; i++) { setbit(cg_inosused(&acg), i); acg.cg_cs.cs_nifree--; } if (cylno > 0) { /* * In cylno 0, beginning space is reserved * for boot and super blocks. */ for (d = 0; d < dlower; d += sblock.fs_frag) { blkno = d / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), blkno); if (sblock.fs_contigsumsize > 0) setbit(cg_clustersfree(&acg), blkno); acg.cg_cs.cs_nbfree++; } } if ((i = dupper % sblock.fs_frag)) { acg.cg_frsum[sblock.fs_frag - i]++; for (d = dupper + sblock.fs_frag - i; dupper < d; dupper++) { setbit(cg_blksfree(&acg), dupper); acg.cg_cs.cs_nffree++; } } for (d = dupper; d + sblock.fs_frag <= acg.cg_ndblk; d += sblock.fs_frag) { blkno = d / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), blkno); if (sblock.fs_contigsumsize > 0) setbit(cg_clustersfree(&acg), blkno); acg.cg_cs.cs_nbfree++; } if (d < acg.cg_ndblk) { acg.cg_frsum[acg.cg_ndblk - d]++; for (; d < acg.cg_ndblk; d++) { setbit(cg_blksfree(&acg), d); acg.cg_cs.cs_nffree++; } } if (sblock.fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(&acg); u_char *mapp = cg_clustersfree(&acg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < acg.cg_nclusterblks; i++) { if ((map & bit) != 0) run++; else if (run != 0) { if (run > sblock.fs_contigsumsize) run = sblock.fs_contigsumsize; sump[run]++; run = 0; } if ((i & (CHAR_BIT - 1)) != CHAR_BIT - 1) bit <<= 1; else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > sblock.fs_contigsumsize) run = sblock.fs_contigsumsize; sump[run]++; } } *cs = acg.cg_cs; /* * Write out the duplicate super block. Then write the cylinder * group map and two blocks worth of inodes in a single write. */ savedactualloc = sblock.fs_sblockactualloc; sblock.fs_sblockactualloc = dbtob(fsbtodb(&sblock, cgsblock(&sblock, cylno))); if (sbput(disk.d_fd, &disk.d_fs, 0) != 0) err(1, "sbput: %s", disk.d_error); sblock.fs_sblockactualloc = savedactualloc; if (cgput(&disk, &acg) != 0) err(1, "initcg: cgput: %s", disk.d_error); start = 0; dp1 = (struct ufs1_dinode *)(&iobuf[start]); dp2 = (struct ufs2_dinode *)(&iobuf[start]); for (i = 0; i < acg.cg_initediblk; i++) { if (sblock.fs_magic == FS_UFS1_MAGIC) { dp1->di_gen = newfs_random(); dp1++; } else { dp2->di_gen = newfs_random(); dp2++; } } wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno)), iobufsize, iobuf); /* * For the old file system, we have to initialize all the inodes. */ if (Oflag == 1) { for (i = 2 * sblock.fs_frag; i < sblock.fs_ipg / INOPF(&sblock); i += sblock.fs_frag) { dp1 = (struct ufs1_dinode *)(&iobuf[start]); for (j = 0; j < INOPB(&sblock); j++) { dp1->di_gen = newfs_random(); dp1++; } wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno) + i), sblock.fs_bsize, &iobuf[start]); } } } /* * initialize the file system */ #define ROOTLINKCNT 3 static struct direct root_dir[] = { { UFS_ROOTINO, sizeof(struct direct), DT_DIR, 1, "." }, { UFS_ROOTINO, sizeof(struct direct), DT_DIR, 2, ".." }, { UFS_ROOTINO + 1, sizeof(struct direct), DT_DIR, 5, ".snap" }, }; #define SNAPLINKCNT 2 static struct direct snap_dir[] = { { UFS_ROOTINO + 1, sizeof(struct direct), DT_DIR, 1, "." }, { UFS_ROOTINO, sizeof(struct direct), DT_DIR, 2, ".." }, }; void fsinit(time_t utime) { union dinode node; struct group *grp; gid_t gid; int entries; memset(&node, 0, sizeof node); if ((grp = getgrnam("operator")) != NULL) { gid = grp->gr_gid; } else { warnx("Cannot retrieve operator gid, using gid 0."); gid = 0; } entries = (nflag) ? ROOTLINKCNT - 1: ROOTLINKCNT; if (sblock.fs_magic == FS_UFS1_MAGIC) { /* * initialize the node */ node.dp1.di_atime = utime; node.dp1.di_mtime = utime; node.dp1.di_ctime = utime; /* * create the root directory */ node.dp1.di_mode = IFDIR | UMASK; node.dp1.di_nlink = entries; node.dp1.di_size = makedir(root_dir, entries); node.dp1.di_db[0] = alloc(sblock.fs_fsize, node.dp1.di_mode); node.dp1.di_blocks = btodb(fragroundup(&sblock, node.dp1.di_size)); wtfs(fsbtodb(&sblock, node.dp1.di_db[0]), sblock.fs_fsize, iobuf); iput(&node, UFS_ROOTINO); if (!nflag) { /* * create the .snap directory */ node.dp1.di_mode |= 020; node.dp1.di_gid = gid; node.dp1.di_nlink = SNAPLINKCNT; node.dp1.di_size = makedir(snap_dir, SNAPLINKCNT); node.dp1.di_db[0] = alloc(sblock.fs_fsize, node.dp1.di_mode); node.dp1.di_blocks = btodb(fragroundup(&sblock, node.dp1.di_size)); wtfs(fsbtodb(&sblock, node.dp1.di_db[0]), sblock.fs_fsize, iobuf); iput(&node, UFS_ROOTINO + 1); } } else { /* * initialize the node */ node.dp2.di_atime = utime; node.dp2.di_mtime = utime; node.dp2.di_ctime = utime; node.dp2.di_birthtime = utime; /* * create the root directory */ node.dp2.di_mode = IFDIR | UMASK; node.dp2.di_nlink = entries; node.dp2.di_size = makedir(root_dir, entries); node.dp2.di_db[0] = alloc(sblock.fs_fsize, node.dp2.di_mode); node.dp2.di_blocks = btodb(fragroundup(&sblock, node.dp2.di_size)); wtfs(fsbtodb(&sblock, node.dp2.di_db[0]), sblock.fs_fsize, iobuf); iput(&node, UFS_ROOTINO); if (!nflag) { /* * create the .snap directory */ node.dp2.di_mode |= 020; node.dp2.di_gid = gid; node.dp2.di_nlink = SNAPLINKCNT; node.dp2.di_size = makedir(snap_dir, SNAPLINKCNT); node.dp2.di_db[0] = alloc(sblock.fs_fsize, node.dp2.di_mode); node.dp2.di_blocks = btodb(fragroundup(&sblock, node.dp2.di_size)); wtfs(fsbtodb(&sblock, node.dp2.di_db[0]), sblock.fs_fsize, iobuf); iput(&node, UFS_ROOTINO + 1); } } } /* * construct a set of directory entries in "iobuf". * return size of directory. */ int makedir(struct direct *protodir, int entries) { char *cp; int i, spcleft; spcleft = DIRBLKSIZ; memset(iobuf, 0, DIRBLKSIZ); for (cp = iobuf, i = 0; i < entries - 1; i++) { protodir[i].d_reclen = DIRSIZ(0, &protodir[i]); memmove(cp, &protodir[i], protodir[i].d_reclen); cp += protodir[i].d_reclen; spcleft -= protodir[i].d_reclen; } protodir[i].d_reclen = spcleft; memmove(cp, &protodir[i], DIRSIZ(0, &protodir[i])); return (DIRBLKSIZ); } /* * allocate a block or frag */ ufs2_daddr_t alloc(int size, int mode) { int i, blkno, frag; uint d; bread(&disk, part_ofs + fsbtodb(&sblock, cgtod(&sblock, 0)), (char *)&acg, sblock.fs_cgsize); if (acg.cg_magic != CG_MAGIC) { printf("cg 0: bad magic number\n"); exit(38); } if (acg.cg_cs.cs_nbfree == 0) { printf("first cylinder group ran out of space\n"); exit(39); } for (d = 0; d < acg.cg_ndblk; d += sblock.fs_frag) if (isblock(&sblock, cg_blksfree(&acg), d / sblock.fs_frag)) goto goth; printf("internal error: can't find block in cyl 0\n"); exit(40); goth: blkno = fragstoblks(&sblock, d); clrblock(&sblock, cg_blksfree(&acg), blkno); if (sblock.fs_contigsumsize > 0) clrbit(cg_clustersfree(&acg), blkno); acg.cg_cs.cs_nbfree--; sblock.fs_cstotal.cs_nbfree--; fscs[0].cs_nbfree--; if (mode & IFDIR) { acg.cg_cs.cs_ndir++; sblock.fs_cstotal.cs_ndir++; fscs[0].cs_ndir++; } if (size != sblock.fs_bsize) { frag = howmany(size, sblock.fs_fsize); fscs[0].cs_nffree += sblock.fs_frag - frag; sblock.fs_cstotal.cs_nffree += sblock.fs_frag - frag; acg.cg_cs.cs_nffree += sblock.fs_frag - frag; acg.cg_frsum[sblock.fs_frag - frag]++; for (i = frag; i < sblock.fs_frag; i++) setbit(cg_blksfree(&acg), d + i); } if (cgput(&disk, &acg) != 0) err(1, "alloc: cgput: %s", disk.d_error); return ((ufs2_daddr_t)d); } /* * Allocate an inode on the disk */ void iput(union dinode *ip, ino_t ino) { - ufs2_daddr_t d; + union dinodep dp; bread(&disk, part_ofs + fsbtodb(&sblock, cgtod(&sblock, 0)), (char *)&acg, sblock.fs_cgsize); if (acg.cg_magic != CG_MAGIC) { printf("cg 0: bad magic number\n"); exit(31); } acg.cg_cs.cs_nifree--; setbit(cg_inosused(&acg), ino); if (cgput(&disk, &acg) != 0) err(1, "iput: cgput: %s", disk.d_error); sblock.fs_cstotal.cs_nifree--; fscs[0].cs_nifree--; - if (ino >= (unsigned long)sblock.fs_ipg * sblock.fs_ncg) { - printf("fsinit: inode value out of range (%ju).\n", - (uintmax_t)ino); + if (getinode(&disk, &dp, ino) == -1) { + printf("iput: %s\n", disk.d_error); exit(32); } - d = fsbtodb(&sblock, ino_to_fsba(&sblock, ino)); - bread(&disk, part_ofs + d, (char *)iobuf, sblock.fs_bsize); if (sblock.fs_magic == FS_UFS1_MAGIC) - ((struct ufs1_dinode *)iobuf)[ino_to_fsbo(&sblock, ino)] = - ip->dp1; + *dp.dp1 = ip->dp1; else - ((struct ufs2_dinode *)iobuf)[ino_to_fsbo(&sblock, ino)] = - ip->dp2; - wtfs(d, sblock.fs_bsize, (char *)iobuf); + *dp.dp2 = ip->dp2; + putinode(&disk); } /* * possibly write to disk */ static void wtfs(ufs2_daddr_t bno, int size, char *bf) { if (Nflag) return; if (bwrite(&disk, part_ofs + bno, bf, size) < 0) err(36, "wtfs: %d bytes at sector %jd", size, (intmax_t)bno); } /* * check if a block is available */ static int isblock(struct fs *fs, unsigned char *cp, int h) { unsigned char mask; switch (fs->fs_frag) { case 8: return (cp[h] == 0xff); case 4: mask = 0x0f << ((h & 0x1) << 2); return ((cp[h >> 1] & mask) == mask); case 2: mask = 0x03 << ((h & 0x3) << 1); return ((cp[h >> 2] & mask) == mask); case 1: mask = 0x01 << (h & 0x7); return ((cp[h >> 3] & mask) == mask); default: fprintf(stderr, "isblock bad fs_frag %d\n", fs->fs_frag); return (0); } } /* * take a block out of the map */ static void clrblock(struct fs *fs, unsigned char *cp, int h) { switch ((fs)->fs_frag) { case 8: cp[h] = 0; return; case 4: cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); return; case 2: cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); return; case 1: cp[h >> 3] &= ~(0x01 << (h & 0x7)); return; default: fprintf(stderr, "clrblock bad fs_frag %d\n", fs->fs_frag); return; } } /* * put a block into the map */ static void setblock(struct fs *fs, unsigned char *cp, int h) { switch (fs->fs_frag) { case 8: cp[h] = 0xff; return; case 4: cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); return; case 2: cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); return; case 1: cp[h >> 3] |= (0x01 << (h & 0x7)); return; default: fprintf(stderr, "setblock bad fs_frag %d\n", fs->fs_frag); return; } } /* * Determine the number of characters in a * single line. */ static int charsperline(void) { int columns; char *cp; struct winsize ws; columns = 0; if (ioctl(0, TIOCGWINSZ, &ws) != -1) columns = ws.ws_col; if (columns == 0 && (cp = getenv("COLUMNS"))) columns = atoi(cp); if (columns == 0) columns = 80; /* last resort */ return (columns); } static int ilog2(int val) { u_int n; for (n = 0; n < sizeof(n) * CHAR_BIT; n++) if (1 << n == val) return (n); errx(1, "ilog2: %d is not a power of 2\n", val); } /* * For the regression test, return predictable random values. * Otherwise use a true random number generator. */ static u_int32_t newfs_random(void) { static int nextnum = 1; if (Rflag) return (nextnum++); return (arc4random()); } Index: projects/clang700-import/sbin/tunefs/tunefs.c =================================================================== --- projects/clang700-import/sbin/tunefs/tunefs.c (revision 340426) +++ projects/clang700-import/sbin/tunefs/tunefs.c (revision 340427) @@ -1,1148 +1,1131 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1983, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1983, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint static char sccsid[] = "@(#)tunefs.c 8.2 (Berkeley) 4/19/94"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); /* * tunefs: change layout parameters to an existing file system. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* the optimization warning string template */ #define OPTWARN "should optimize for %s with minfree %s %d%%" static int blocks; static char clrbuf[MAXBSIZE]; static struct uufsd disk; #define sblock disk.d_fs static void usage(void); static void printfs(void); static int journal_alloc(int64_t size); static void journal_clear(void); static void sbdirty(void); int main(int argc, char *argv[]) { const char *avalue, *jvalue, *Jvalue, *Lvalue, *lvalue, *Nvalue, *nvalue; const char *tvalue; const char *special, *on; const char *name; int active; int Aflag, aflag, eflag, evalue, fflag, fvalue, jflag, Jflag, kflag; int kvalue, Lflag, lflag, mflag, mvalue, Nflag, nflag, oflag, ovalue; int pflag, sflag, svalue, Svalue, tflag; int ch, found_arg, i; int iovlen = 0; const char *chg[2]; struct statfs stfs; struct iovec *iov = NULL; char errmsg[255] = {0}; if (argc < 3) usage(); Aflag = aflag = eflag = fflag = jflag = Jflag = kflag = Lflag = 0; lflag = mflag = Nflag = nflag = oflag = pflag = sflag = tflag = 0; avalue = jvalue = Jvalue = Lvalue = lvalue = Nvalue = nvalue = NULL; evalue = fvalue = mvalue = ovalue = svalue = Svalue = 0; active = 0; found_arg = 0; /* At least one arg is required. */ while ((ch = getopt(argc, argv, "Aa:e:f:j:J:k:L:l:m:N:n:o:ps:S:t:")) != -1) switch (ch) { case 'A': found_arg = 1; Aflag++; break; case 'a': found_arg = 1; name = "POSIX.1e ACLs"; avalue = optarg; if (strcmp(avalue, "enable") && strcmp(avalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } aflag = 1; break; case 'e': found_arg = 1; name = "maximum blocks per file in a cylinder group"; evalue = atoi(optarg); if (evalue < 1) errx(10, "%s must be >= 1 (was %s)", name, optarg); eflag = 1; break; case 'f': found_arg = 1; name = "average file size"; fvalue = atoi(optarg); if (fvalue < 1) errx(10, "%s must be >= 1 (was %s)", name, optarg); fflag = 1; break; case 'j': found_arg = 1; name = "softdep journaled file system"; jvalue = optarg; if (strcmp(jvalue, "enable") && strcmp(jvalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } jflag = 1; break; case 'J': found_arg = 1; name = "gjournaled file system"; Jvalue = optarg; if (strcmp(Jvalue, "enable") && strcmp(Jvalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } Jflag = 1; break; case 'k': found_arg = 1; name = "space to hold for metadata blocks"; kvalue = atoi(optarg); if (kvalue < 0) errx(10, "bad %s (%s)", name, optarg); kflag = 1; break; case 'L': found_arg = 1; name = "volume label"; Lvalue = optarg; i = -1; while (isalnum(Lvalue[++i])); if (Lvalue[i] != '\0') { errx(10, "bad %s. Valid characters are alphanumerics.", name); } if (strlen(Lvalue) >= MAXVOLLEN) { errx(10, "bad %s. Length is longer than %d.", name, MAXVOLLEN - 1); } Lflag = 1; break; case 'l': found_arg = 1; name = "multilabel MAC file system"; lvalue = optarg; if (strcmp(lvalue, "enable") && strcmp(lvalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } lflag = 1; break; case 'm': found_arg = 1; name = "minimum percentage of free space"; mvalue = atoi(optarg); if (mvalue < 0 || mvalue > 99) errx(10, "bad %s (%s)", name, optarg); mflag = 1; break; case 'N': found_arg = 1; name = "NFSv4 ACLs"; Nvalue = optarg; if (strcmp(Nvalue, "enable") && strcmp(Nvalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } Nflag = 1; break; case 'n': found_arg = 1; name = "soft updates"; nvalue = optarg; if (strcmp(nvalue, "enable") != 0 && strcmp(nvalue, "disable") != 0) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } nflag = 1; break; case 'o': found_arg = 1; name = "optimization preference"; if (strcmp(optarg, "space") == 0) ovalue = FS_OPTSPACE; else if (strcmp(optarg, "time") == 0) ovalue = FS_OPTTIME; else errx(10, "bad %s (options are `space' or `time')", name); oflag = 1; break; case 'p': found_arg = 1; pflag = 1; break; case 's': found_arg = 1; name = "expected number of files per directory"; svalue = atoi(optarg); if (svalue < 1) errx(10, "%s must be >= 1 (was %s)", name, optarg); sflag = 1; break; case 'S': found_arg = 1; name = "Softdep Journal Size"; Svalue = atoi(optarg); if (Svalue < SUJ_MIN) errx(10, "%s must be >= %d (was %s)", name, SUJ_MIN, optarg); break; case 't': found_arg = 1; name = "trim"; tvalue = optarg; if (strcmp(tvalue, "enable") != 0 && strcmp(tvalue, "disable") != 0) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } tflag = 1; break; default: usage(); } argc -= optind; argv += optind; if (found_arg == 0 || argc != 1) usage(); on = special = argv[0]; if (ufs_disk_fillout(&disk, special) == -1) goto err; if (disk.d_name != special) { if (statfs(special, &stfs) != 0) warn("Can't stat %s", special); if (strcmp(special, stfs.f_mntonname) == 0) active = 1; } if (pflag) { printfs(); exit(0); } if (Lflag) { name = "volume label"; strncpy(sblock.fs_volname, Lvalue, MAXVOLLEN); } if (aflag) { name = "POSIX.1e ACLs"; if (strcmp(avalue, "enable") == 0) { if (sblock.fs_flags & FS_ACLS) { warnx("%s remains unchanged as enabled", name); } else if (sblock.fs_flags & FS_NFS4ACLS) { warnx("%s and NFSv4 ACLs are mutually " "exclusive", name); } else { sblock.fs_flags |= FS_ACLS; warnx("%s set", name); } } else if (strcmp(avalue, "disable") == 0) { if ((~sblock.fs_flags & FS_ACLS) == FS_ACLS) { warnx("%s remains unchanged as disabled", name); } else { sblock.fs_flags &= ~FS_ACLS; warnx("%s cleared", name); } } } if (eflag) { name = "maximum blocks per file in a cylinder group"; if (sblock.fs_maxbpg == evalue) warnx("%s remains unchanged as %d", name, evalue); else { warnx("%s changes from %d to %d", name, sblock.fs_maxbpg, evalue); sblock.fs_maxbpg = evalue; } } if (fflag) { name = "average file size"; if (sblock.fs_avgfilesize == (unsigned)fvalue) { warnx("%s remains unchanged as %d", name, fvalue); } else { warnx("%s changes from %d to %d", name, sblock.fs_avgfilesize, fvalue); sblock.fs_avgfilesize = fvalue; } } if (jflag) { name = "soft updates journaling"; if (strcmp(jvalue, "enable") == 0) { if ((sblock.fs_flags & (FS_DOSOFTDEP | FS_SUJ)) == (FS_DOSOFTDEP | FS_SUJ)) { warnx("%s remains unchanged as enabled", name); } else if (sblock.fs_clean == 0) { warnx("%s cannot be enabled until fsck is run", name); } else if (journal_alloc(Svalue) != 0) { warnx("%s cannot be enabled", name); } else { sblock.fs_flags |= FS_DOSOFTDEP | FS_SUJ; warnx("%s set", name); } } else if (strcmp(jvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_SUJ) == FS_SUJ) { warnx("%s remains unchanged as disabled", name); } else { journal_clear(); sblock.fs_flags &= ~FS_SUJ; sblock.fs_sujfree = 0; warnx("%s cleared but soft updates still set.", name); warnx("remove .sujournal to reclaim space"); } } } if (Jflag) { name = "gjournal"; if (strcmp(Jvalue, "enable") == 0) { if (sblock.fs_flags & FS_GJOURNAL) { warnx("%s remains unchanged as enabled", name); } else { sblock.fs_flags |= FS_GJOURNAL; warnx("%s set", name); } } else if (strcmp(Jvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_GJOURNAL) == FS_GJOURNAL) { warnx("%s remains unchanged as disabled", name); } else { sblock.fs_flags &= ~FS_GJOURNAL; warnx("%s cleared", name); } } } if (kflag) { name = "space to hold for metadata blocks"; if (sblock.fs_metaspace == kvalue) warnx("%s remains unchanged as %d", name, kvalue); else { kvalue = blknum(&sblock, kvalue); if (kvalue > sblock.fs_fpg / 2) { kvalue = blknum(&sblock, sblock.fs_fpg / 2); warnx("%s cannot exceed half the file system " "space", name); } warnx("%s changes from %jd to %d", name, sblock.fs_metaspace, kvalue); sblock.fs_metaspace = kvalue; } } if (lflag) { name = "multilabel"; if (strcmp(lvalue, "enable") == 0) { if (sblock.fs_flags & FS_MULTILABEL) { warnx("%s remains unchanged as enabled", name); } else { sblock.fs_flags |= FS_MULTILABEL; warnx("%s set", name); } } else if (strcmp(lvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_MULTILABEL) == FS_MULTILABEL) { warnx("%s remains unchanged as disabled", name); } else { sblock.fs_flags &= ~FS_MULTILABEL; warnx("%s cleared", name); } } } if (mflag) { name = "minimum percentage of free space"; if (sblock.fs_minfree == mvalue) warnx("%s remains unchanged as %d%%", name, mvalue); else { warnx("%s changes from %d%% to %d%%", name, sblock.fs_minfree, mvalue); sblock.fs_minfree = mvalue; if (mvalue >= MINFREE && sblock.fs_optim == FS_OPTSPACE) warnx(OPTWARN, "time", ">=", MINFREE); if (mvalue < MINFREE && sblock.fs_optim == FS_OPTTIME) warnx(OPTWARN, "space", "<", MINFREE); } } if (Nflag) { name = "NFSv4 ACLs"; if (strcmp(Nvalue, "enable") == 0) { if (sblock.fs_flags & FS_NFS4ACLS) { warnx("%s remains unchanged as enabled", name); } else if (sblock.fs_flags & FS_ACLS) { warnx("%s and POSIX.1e ACLs are mutually " "exclusive", name); } else { sblock.fs_flags |= FS_NFS4ACLS; warnx("%s set", name); } } else if (strcmp(Nvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_NFS4ACLS) == FS_NFS4ACLS) { warnx("%s remains unchanged as disabled", name); } else { sblock.fs_flags &= ~FS_NFS4ACLS; warnx("%s cleared", name); } } } if (nflag) { name = "soft updates"; if (strcmp(nvalue, "enable") == 0) { if (sblock.fs_flags & FS_DOSOFTDEP) warnx("%s remains unchanged as enabled", name); else if (sblock.fs_clean == 0) { warnx("%s cannot be enabled until fsck is run", name); } else { sblock.fs_flags |= FS_DOSOFTDEP; warnx("%s set", name); } } else if (strcmp(nvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_DOSOFTDEP) == FS_DOSOFTDEP) warnx("%s remains unchanged as disabled", name); else { sblock.fs_flags &= ~FS_DOSOFTDEP; warnx("%s cleared", name); } } } if (oflag) { name = "optimization preference"; chg[FS_OPTSPACE] = "space"; chg[FS_OPTTIME] = "time"; if (sblock.fs_optim == ovalue) warnx("%s remains unchanged as %s", name, chg[ovalue]); else { warnx("%s changes from %s to %s", name, chg[sblock.fs_optim], chg[ovalue]); sblock.fs_optim = ovalue; if (sblock.fs_minfree >= MINFREE && ovalue == FS_OPTSPACE) warnx(OPTWARN, "time", ">=", MINFREE); if (sblock.fs_minfree < MINFREE && ovalue == FS_OPTTIME) warnx(OPTWARN, "space", "<", MINFREE); } } if (sflag) { name = "expected number of files per directory"; if (sblock.fs_avgfpdir == (unsigned)svalue) { warnx("%s remains unchanged as %d", name, svalue); } else { warnx("%s changes from %d to %d", name, sblock.fs_avgfpdir, svalue); sblock.fs_avgfpdir = svalue; } } if (tflag) { name = "issue TRIM to the disk"; if (strcmp(tvalue, "enable") == 0) { if (sblock.fs_flags & FS_TRIM) warnx("%s remains unchanged as enabled", name); else { sblock.fs_flags |= FS_TRIM; warnx("%s set", name); } } else if (strcmp(tvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_TRIM) == FS_TRIM) warnx("%s remains unchanged as disabled", name); else { sblock.fs_flags &= ~FS_TRIM; warnx("%s cleared", name); } } } if (sbwrite(&disk, Aflag) == -1) goto err; ufs_disk_close(&disk); if (active) { build_iovec_argf(&iov, &iovlen, "fstype", "ufs"); build_iovec_argf(&iov, &iovlen, "fspath", "%s", on); build_iovec(&iov, &iovlen, "errmsg", errmsg, sizeof(errmsg)); if (nmount(iov, iovlen, stfs.f_flags | MNT_UPDATE | MNT_RELOAD) < 0) { if (errmsg[0]) err(9, "%s: reload: %s", special, errmsg); else err(9, "%s: reload", special); } warnx("file system reloaded"); } exit(0); err: if (disk.d_error != NULL) errx(11, "%s: %s", special, disk.d_error); else err(12, "%s", special); } static void sbdirty(void) { disk.d_fs.fs_flags |= FS_UNCLEAN | FS_NEEDSFSCK; disk.d_fs.fs_clean = 0; } static ufs2_daddr_t journal_balloc(void) { ufs2_daddr_t blk; struct cg *cgp; int valid; static int contig = 1; cgp = &disk.d_cg; for (;;) { blk = cgballoc(&disk); if (blk > 0) break; /* * If we failed to allocate a block from this cg, move to * the next. */ if (cgwrite(&disk) < 0) { warn("Failed to write updated cg"); return (-1); } while ((valid = cgread(&disk)) == 1) { /* * Try to minimize fragmentation by requiring a minimum * number of blocks present. */ if (cgp->cg_cs.cs_nbfree > 256 * 1024) break; if (contig == 0 && cgp->cg_cs.cs_nbfree) break; } if (valid) continue; /* * Try once through looking only for large contiguous regions * and again taking any space we can find. */ if (contig) { contig = 0; disk.d_ccg = 0; warnx("Journal file fragmented."); continue; } warnx("Failed to find sufficient free blocks for the journal"); return -1; } if (bwrite(&disk, fsbtodb(&sblock, blk), clrbuf, sblock.fs_bsize) <= 0) { warn("Failed to initialize new block"); return -1; } return (blk); } /* * Search a directory block for the SUJ_FILE. */ static ino_t dir_search(ufs2_daddr_t blk, int bytes) { char block[MAXBSIZE]; struct direct *dp; int off; if (bread(&disk, fsbtodb(&sblock, blk), block, bytes) <= 0) { warn("Failed to read dir block"); return (-1); } for (off = 0; off < bytes; off += dp->d_reclen) { dp = (struct direct *)&block[off]; if (dp->d_reclen == 0) break; if (dp->d_ino == 0) continue; if (dp->d_namlen != strlen(SUJ_FILE)) continue; if (bcmp(dp->d_name, SUJ_FILE, dp->d_namlen) != 0) continue; return (dp->d_ino); } return (0); } /* * Search in the UFS_ROOTINO for the SUJ_FILE. If it exists we can not enable * journaling. */ static ino_t journal_findfile(void) { - struct ufs1_dinode *dp1; - struct ufs2_dinode *dp2; + union dinodep dp; ino_t ino; - int mode; - void *ip; int i; - if (getino(&disk, &ip, UFS_ROOTINO, &mode) != 0) { - warn("Failed to get root inode"); + if (getinode(&disk, &dp, UFS_ROOTINO) != 0) { + warn("Failed to get root inode: %s", disk.d_error); return (-1); } - dp2 = ip; - dp1 = ip; if (sblock.fs_magic == FS_UFS1_MAGIC) { - if ((off_t)dp1->di_size >= lblktosize(&sblock, UFS_NDADDR)) { + if ((off_t)dp.dp1->di_size >= lblktosize(&sblock, UFS_NDADDR)) { warnx("UFS_ROOTINO extends beyond direct blocks."); return (-1); } for (i = 0; i < UFS_NDADDR; i++) { - if (dp1->di_db[i] == 0) + if (dp.dp1->di_db[i] == 0) break; - if ((ino = dir_search(dp1->di_db[i], - sblksize(&sblock, (off_t)dp1->di_size, i))) != 0) + if ((ino = dir_search(dp.dp1->di_db[i], + sblksize(&sblock, (off_t)dp.dp1->di_size, i))) != 0) return (ino); } } else { - if ((off_t)dp2->di_size >= lblktosize(&sblock, UFS_NDADDR)) { + if ((off_t)dp.dp2->di_size >= lblktosize(&sblock, UFS_NDADDR)) { warnx("UFS_ROOTINO extends beyond direct blocks."); return (-1); } for (i = 0; i < UFS_NDADDR; i++) { - if (dp2->di_db[i] == 0) + if (dp.dp2->di_db[i] == 0) break; - if ((ino = dir_search(dp2->di_db[i], - sblksize(&sblock, (off_t)dp2->di_size, i))) != 0) + if ((ino = dir_search(dp.dp2->di_db[i], + sblksize(&sblock, (off_t)dp.dp2->di_size, i))) != 0) return (ino); } } return (0); } static void dir_clear_block(const char *block, off_t off) { struct direct *dp; for (; off < sblock.fs_bsize; off += DIRBLKSIZ) { dp = (struct direct *)&block[off]; dp->d_ino = 0; dp->d_reclen = DIRBLKSIZ; dp->d_type = DT_UNKNOWN; } } /* * Insert the journal at inode 'ino' into directory blk 'blk' at the first * free offset of 'off'. DIRBLKSIZ blocks after off are initialized as * empty. */ static int dir_insert(ufs2_daddr_t blk, off_t off, ino_t ino) { struct direct *dp; char block[MAXBSIZE]; if (bread(&disk, fsbtodb(&sblock, blk), block, sblock.fs_bsize) <= 0) { warn("Failed to read dir block"); return (-1); } bzero(&block[off], sblock.fs_bsize - off); dp = (struct direct *)&block[off]; dp->d_ino = ino; dp->d_reclen = DIRBLKSIZ; dp->d_type = DT_REG; dp->d_namlen = strlen(SUJ_FILE); bcopy(SUJ_FILE, &dp->d_name, strlen(SUJ_FILE)); dir_clear_block(block, off + DIRBLKSIZ); if (bwrite(&disk, fsbtodb(&sblock, blk), block, sblock.fs_bsize) <= 0) { warn("Failed to write dir block"); return (-1); } return (0); } /* * Extend a directory block in 'blk' by copying it to a full size block * and inserting the new journal inode into .sujournal. */ static int dir_extend(ufs2_daddr_t blk, ufs2_daddr_t nblk, off_t size, ino_t ino) { char block[MAXBSIZE]; if (bread(&disk, fsbtodb(&sblock, blk), block, roundup(size, sblock.fs_fsize)) <= 0) { warn("Failed to read dir block"); return (-1); } dir_clear_block(block, size); if (bwrite(&disk, fsbtodb(&sblock, nblk), block, sblock.fs_bsize) <= 0) { warn("Failed to write dir block"); return (-1); } return (dir_insert(nblk, size, ino)); } /* * Insert the journal file into the UFS_ROOTINO directory. We always extend the * last frag */ static int journal_insertfile(ino_t ino) { - struct ufs1_dinode *dp1; - struct ufs2_dinode *dp2; - void *ip; + union dinodep dp; ufs2_daddr_t nblk; ufs2_daddr_t blk; ufs_lbn_t lbn; int size; - int mode; int off; - if (getino(&disk, &ip, UFS_ROOTINO, &mode) != 0) { - warn("Failed to get root inode"); + if (getinode(&disk, &dp, UFS_ROOTINO) != 0) { + warn("Failed to get root inode: %s", disk.d_error); sbdirty(); return (-1); } - dp2 = ip; - dp1 = ip; blk = 0; size = 0; nblk = journal_balloc(); if (nblk <= 0) return (-1); /* * For simplicity sake we aways extend the UFS_ROOTINO into a new * directory block rather than searching for space and inserting * into an existing block. However, if the rootino has frags * have to free them and extend the block. */ if (sblock.fs_magic == FS_UFS1_MAGIC) { - lbn = lblkno(&sblock, dp1->di_size); - off = blkoff(&sblock, dp1->di_size); - blk = dp1->di_db[lbn]; - size = sblksize(&sblock, (off_t)dp1->di_size, lbn); + lbn = lblkno(&sblock, dp.dp1->di_size); + off = blkoff(&sblock, dp.dp1->di_size); + blk = dp.dp1->di_db[lbn]; + size = sblksize(&sblock, (off_t)dp.dp1->di_size, lbn); } else { - lbn = lblkno(&sblock, dp2->di_size); - off = blkoff(&sblock, dp2->di_size); - blk = dp2->di_db[lbn]; - size = sblksize(&sblock, (off_t)dp2->di_size, lbn); + lbn = lblkno(&sblock, dp.dp2->di_size); + off = blkoff(&sblock, dp.dp2->di_size); + blk = dp.dp2->di_db[lbn]; + size = sblksize(&sblock, (off_t)dp.dp2->di_size, lbn); } if (off != 0) { if (dir_extend(blk, nblk, off, ino) == -1) return (-1); } else { blk = 0; if (dir_insert(nblk, 0, ino) == -1) return (-1); } if (sblock.fs_magic == FS_UFS1_MAGIC) { - dp1->di_blocks += (sblock.fs_bsize - size) / DEV_BSIZE; - dp1->di_db[lbn] = nblk; - dp1->di_size = lblktosize(&sblock, lbn+1); + dp.dp1->di_blocks += (sblock.fs_bsize - size) / DEV_BSIZE; + dp.dp1->di_db[lbn] = nblk; + dp.dp1->di_size = lblktosize(&sblock, lbn+1); } else { - dp2->di_blocks += (sblock.fs_bsize - size) / DEV_BSIZE; - dp2->di_db[lbn] = nblk; - dp2->di_size = lblktosize(&sblock, lbn+1); + dp.dp2->di_blocks += (sblock.fs_bsize - size) / DEV_BSIZE; + dp.dp2->di_db[lbn] = nblk; + dp.dp2->di_size = lblktosize(&sblock, lbn+1); } - if (putino(&disk) < 0) { - warn("Failed to write root inode"); + if (putinode(&disk) < 0) { + warn("Failed to write root inode: %s", disk.d_error); return (-1); } if (cgwrite(&disk) < 0) { warn("Failed to write updated cg"); sbdirty(); return (-1); } if (blk) { if (cgbfree(&disk, blk, size) < 0) { warn("Failed to write cg"); return (-1); } } return (0); } static int indir_fill(ufs2_daddr_t blk, int level, int *resid) { char indirbuf[MAXBSIZE]; ufs1_daddr_t *bap1; ufs2_daddr_t *bap2; ufs2_daddr_t nblk; int ncnt; int cnt; int i; bzero(indirbuf, sizeof(indirbuf)); bap1 = (ufs1_daddr_t *)indirbuf; bap2 = (void *)bap1; cnt = 0; for (i = 0; i < NINDIR(&sblock) && *resid != 0; i++) { nblk = journal_balloc(); if (nblk <= 0) return (-1); cnt++; if (sblock.fs_magic == FS_UFS1_MAGIC) *bap1++ = nblk; else *bap2++ = nblk; if (level != 0) { ncnt = indir_fill(nblk, level - 1, resid); if (ncnt <= 0) return (-1); cnt += ncnt; } else (*resid)--; } if (bwrite(&disk, fsbtodb(&sblock, blk), indirbuf, sblock.fs_bsize) <= 0) { warn("Failed to write indirect"); return (-1); } return (cnt); } /* * Clear the flag bits so the journal can be removed. */ static void journal_clear(void) { - struct ufs1_dinode *dp1; - struct ufs2_dinode *dp2; + union dinodep dp; ino_t ino; - int mode; - void *ip; ino = journal_findfile(); if (ino == (ino_t)-1 || ino == 0) { warnx("Journal file does not exist"); return; } printf("Clearing journal flags from inode %ju\n", (uintmax_t)ino); - if (getino(&disk, &ip, ino, &mode) != 0) { - warn("Failed to get journal inode"); + if (getinode(&disk, &dp, ino) != 0) { + warn("Failed to get journal inode: %s", disk.d_error); return; } - dp2 = ip; - dp1 = ip; if (sblock.fs_magic == FS_UFS1_MAGIC) - dp1->di_flags = 0; + dp.dp1->di_flags = 0; else - dp2->di_flags = 0; - if (putino(&disk) < 0) { - warn("Failed to write journal inode"); + dp.dp2->di_flags = 0; + if (putinode(&disk) < 0) { + warn("Failed to write journal inode: %s", disk.d_error); return; } } static int journal_alloc(int64_t size) { - struct ufs1_dinode *dp1; - struct ufs2_dinode *dp2; + union dinodep dp; ufs2_daddr_t blk; - void *ip; struct cg *cgp; int resid; ino_t ino; int blks; - int mode; time_t utime; int i; cgp = &disk.d_cg; ino = 0; /* * If the journal file exists we can't allocate it. */ ino = journal_findfile(); if (ino == (ino_t)-1) { warnx("journal_findfile() failed."); return (-1); } if (ino > 0) { warnx("Journal file %s already exists, please remove.", SUJ_FILE); return (-1); } /* * If the user didn't supply a size pick one based on the filesystem * size constrained with hardcoded MIN and MAX values. We opt for * 1/1024th of the filesystem up to MAX but not exceeding one CG and * not less than the MIN. */ if (size == 0) { size = (sblock.fs_size * sblock.fs_bsize) / 1024; size = MIN(SUJ_MAX, size); if (size / sblock.fs_fsize > sblock.fs_fpg) size = sblock.fs_fpg * sblock.fs_fsize; size = MAX(SUJ_MIN, size); /* fsck does not support fragments in journal files. */ size = roundup(size, sblock.fs_bsize); } resid = blocks = size / sblock.fs_bsize; if (sblock.fs_cstotal.cs_nbfree < blocks) { warn("Insufficient free space for %jd byte journal", size); return (-1); } /* * Find a cg with enough blocks to satisfy the journal * size. Presently the journal does not span cgs. */ while (cgread(&disk) == 1) { if (cgp->cg_cs.cs_nifree == 0) continue; ino = cgialloc(&disk); if (ino <= 0) break; printf("Using inode %ju in cg %d for %jd byte journal\n", (uintmax_t)ino, cgp->cg_cgx, size); - if (getino(&disk, &ip, ino, &mode) != 0) { - warn("Failed to get allocated inode"); + if (getinode(&disk, &dp, ino) != 0) { + warn("Failed to get allocated inode: %s", disk.d_error); sbdirty(); goto out; } /* * We leave fields unrelated to the number of allocated * blocks and size uninitialized. This causes legacy * fsck implementations to clear the inode. */ - dp2 = ip; - dp1 = ip; time(&utime); if (sblock.fs_magic == FS_UFS1_MAGIC) { - bzero(dp1, sizeof(*dp1)); - dp1->di_size = size; - dp1->di_mode = IFREG | IREAD; - dp1->di_nlink = 1; - dp1->di_flags = SF_IMMUTABLE | SF_NOUNLINK | UF_NODUMP; - dp1->di_atime = utime; - dp1->di_mtime = utime; - dp1->di_ctime = utime; + bzero(dp.dp1, sizeof(*dp.dp1)); + dp.dp1->di_size = size; + dp.dp1->di_mode = IFREG | IREAD; + dp.dp1->di_nlink = 1; + dp.dp1->di_flags = + SF_IMMUTABLE | SF_NOUNLINK | UF_NODUMP; + dp.dp1->di_atime = utime; + dp.dp1->di_mtime = utime; + dp.dp1->di_ctime = utime; } else { - bzero(dp2, sizeof(*dp2)); - dp2->di_size = size; - dp2->di_mode = IFREG | IREAD; - dp2->di_nlink = 1; - dp2->di_flags = SF_IMMUTABLE | SF_NOUNLINK | UF_NODUMP; - dp2->di_atime = utime; - dp2->di_mtime = utime; - dp2->di_ctime = utime; - dp2->di_birthtime = utime; + bzero(dp.dp2, sizeof(*dp.dp2)); + dp.dp2->di_size = size; + dp.dp2->di_mode = IFREG | IREAD; + dp.dp2->di_nlink = 1; + dp.dp2->di_flags = + SF_IMMUTABLE | SF_NOUNLINK | UF_NODUMP; + dp.dp2->di_atime = utime; + dp.dp2->di_mtime = utime; + dp.dp2->di_ctime = utime; + dp.dp2->di_birthtime = utime; } for (i = 0; i < UFS_NDADDR && resid; i++, resid--) { blk = journal_balloc(); if (blk <= 0) goto out; if (sblock.fs_magic == FS_UFS1_MAGIC) { - dp1->di_db[i] = blk; - dp1->di_blocks++; + dp.dp1->di_db[i] = blk; + dp.dp1->di_blocks++; } else { - dp2->di_db[i] = blk; - dp2->di_blocks++; + dp.dp2->di_db[i] = blk; + dp.dp2->di_blocks++; } } for (i = 0; i < UFS_NIADDR && resid; i++) { blk = journal_balloc(); if (blk <= 0) goto out; blks = indir_fill(blk, i, &resid) + 1; if (blks <= 0) { sbdirty(); goto out; } if (sblock.fs_magic == FS_UFS1_MAGIC) { - dp1->di_ib[i] = blk; - dp1->di_blocks += blks; + dp.dp1->di_ib[i] = blk; + dp.dp1->di_blocks += blks; } else { - dp2->di_ib[i] = blk; - dp2->di_blocks += blks; + dp.dp2->di_ib[i] = blk; + dp.dp2->di_blocks += blks; } } if (sblock.fs_magic == FS_UFS1_MAGIC) - dp1->di_blocks *= sblock.fs_bsize / disk.d_bsize; + dp.dp1->di_blocks *= sblock.fs_bsize / disk.d_bsize; else - dp2->di_blocks *= sblock.fs_bsize / disk.d_bsize; - if (putino(&disk) < 0) { - warn("Failed to write inode"); + dp.dp2->di_blocks *= sblock.fs_bsize / disk.d_bsize; + if (putinode(&disk) < 0) { + warn("Failed to write allocated inode: %s", + disk.d_error); sbdirty(); return (-1); } if (cgwrite(&disk) < 0) { warn("Failed to write updated cg"); sbdirty(); return (-1); } if (journal_insertfile(ino) < 0) { sbdirty(); return (-1); } sblock.fs_sujfree = 0; return (0); } warnx("Insufficient free space for the journal."); out: return (-1); } static void usage(void) { fprintf(stderr, "%s\n%s\n%s\n%s\n%s\n%s\n", "usage: tunefs [-A] [-a enable | disable] [-e maxbpg] [-f avgfilesize]", " [-J enable | disable] [-j enable | disable] [-k metaspace]", " [-L volname] [-l enable | disable] [-m minfree]", " [-N enable | disable] [-n enable | disable]", " [-o space | time] [-p] [-s avgfpdir] [-t enable | disable]", " special | filesystem"); exit(2); } static void printfs(void) { warnx("POSIX.1e ACLs: (-a) %s", (sblock.fs_flags & FS_ACLS)? "enabled" : "disabled"); warnx("NFSv4 ACLs: (-N) %s", (sblock.fs_flags & FS_NFS4ACLS)? "enabled" : "disabled"); warnx("MAC multilabel: (-l) %s", (sblock.fs_flags & FS_MULTILABEL)? "enabled" : "disabled"); warnx("soft updates: (-n) %s", (sblock.fs_flags & FS_DOSOFTDEP)? "enabled" : "disabled"); warnx("soft update journaling: (-j) %s", (sblock.fs_flags & FS_SUJ)? "enabled" : "disabled"); warnx("gjournal: (-J) %s", (sblock.fs_flags & FS_GJOURNAL)? "enabled" : "disabled"); warnx("trim: (-t) %s", (sblock.fs_flags & FS_TRIM)? "enabled" : "disabled"); warnx("maximum blocks per file in a cylinder group: (-e) %d", sblock.fs_maxbpg); warnx("average file size: (-f) %d", sblock.fs_avgfilesize); warnx("average number of files in a directory: (-s) %d", sblock.fs_avgfpdir); warnx("minimum percentage of free space: (-m) %d%%", sblock.fs_minfree); warnx("space to hold for metadata blocks: (-k) %jd", sblock.fs_metaspace); warnx("optimization preference: (-o) %s", sblock.fs_optim == FS_OPTSPACE ? "space" : "time"); if (sblock.fs_minfree >= MINFREE && sblock.fs_optim == FS_OPTSPACE) warnx(OPTWARN, "time", ">=", MINFREE); if (sblock.fs_minfree < MINFREE && sblock.fs_optim == FS_OPTTIME) warnx(OPTWARN, "space", "<", MINFREE); warnx("volume label: (-L) %s", sblock.fs_volname); } Index: projects/clang700-import/share/mk/bsd.linker.mk =================================================================== --- projects/clang700-import/share/mk/bsd.linker.mk (revision 340426) +++ projects/clang700-import/share/mk/bsd.linker.mk (revision 340427) @@ -1,116 +1,113 @@ # $FreeBSD$ # Setup variables for the linker. # # LINKER_TYPE is the major type of linker. Currently binutils and lld support # automatic detection. # # LINKER_VERSION is a numeric constant equal to: # major * 10000 + minor * 100 + tiny # It too can be overridden on the command line. # # LINKER_FEATURES may contain one or more of the following, based on # linker support for that feature: # # - build-id: support for generating a Build-ID note # - retpoline: support for generating PLT with retpoline speculative # execution vulnerability mitigation # # LINKER_FREEBSD_VERSION is the linker's internal source version. # # These variables with an X_ prefix will also be provided if XLD is set. # # This file may be included multiple times, but only has effect the first time. # .if !target(____) ____: _ld_vars=LD $${_empty_var_} .if !empty(_WANT_TOOLCHAIN_CROSS_VARS) # Only the toplevel makefile needs to compute the X_LINKER_* variables. _ld_vars+=XLD X_ .endif .for ld X_ in ${_ld_vars} .if ${ld} == "LD" || !empty(XLD) # Try to import LINKER_TYPE and LINKER_VERSION from parent make. # The value is only used/exported for the same environment that impacts # LD and LINKER_* settings here. _exported_vars= ${X_}LINKER_TYPE ${X_}LINKER_VERSION ${X_}LINKER_FEATURES \ ${X_}LINKER_FREEBSD_VERSION ${X_}_ld_hash= ${${ld}}${MACHINE}${PATH} ${X_}_ld_hash:= ${${X_}_ld_hash:hash} # Only import if none of the vars are set somehow else. _can_export= yes .for var in ${_exported_vars} .if defined(${var}) _can_export= no .endif .endfor .if ${_can_export} == yes .for var in ${_exported_vars} .if defined(${var}.${${X_}_ld_hash}) ${var}= ${${var}.${${X_}_ld_hash}} .endif .endfor .endif .if ${ld} == "LD" || (${ld} == "XLD" && ${XLD} != ${LD}) .if !defined(${X_}LINKER_TYPE) || !defined(${X_}LINKER_VERSION) _ld_version!= (${${ld}} --version || echo none) | sed -n 1p .if ${_ld_version} == "none" .warning Unable to determine linker type from ${ld}=${${ld}} .endif .if ${_ld_version:[1..2]} == "GNU ld" ${X_}LINKER_TYPE= bfd ${X_}LINKER_FREEBSD_VERSION= 0 _v= ${_ld_version:M[1-9].[0-9]*:[1]} .elif ${_ld_version:[1]} == "LLD" ${X_}LINKER_TYPE= lld _v= ${_ld_version:[2]} ${X_}LINKER_FREEBSD_VERSION!= \ ${${ld}} --version | \ awk '$$3 ~ /FreeBSD/ {print substr($$4, 1, length($$4)-1)}' .else .warning Unknown linker from ${ld}=${${ld}}: ${_ld_version}, defaulting to bfd ${X_}LINKER_TYPE= bfd _v= 2.17.50 .endif ${X_}LINKER_VERSION!= echo "${_v:M[1-9].[0-9]*}" | \ awk -F. '{print $$1 * 10000 + $$2 * 100 + $$3;}' .undef _ld_version .undef _v ${X_}LINKER_FEATURES= .if ${${X_}LINKER_TYPE} != "bfd" || ${${X_}LINKER_VERSION} > 21750 ${X_}LINKER_FEATURES+= build-id ${X_}LINKER_FEATURES+= ifunc .endif -.if ${${X_}LINKER_TYPE} != "lld" || ${${X_}LINKER_VERSION} >= 50000 -${X_}LINKER_FEATURES+= filter -.endif .if ${${X_}LINKER_TYPE} == "lld" && ${${X_}LINKER_VERSION} >= 60000 ${X_}LINKER_FEATURES+= retpoline .endif .endif .else # Use LD's values X_LINKER_TYPE= ${LINKER_TYPE} X_LINKER_VERSION= ${LINKER_VERSION} X_LINKER_FEATURES= ${LINKER_FEATURES} X_LINKER_FREEBSD_VERSION= ${LINKER_FREEBSD_VERSION} .endif # ${ld} == "LD" || (${ld} == "XLD" && ${XLD} != ${LD}) # Export the values so sub-makes don't have to look them up again, using the # hash key computed above. .for var in ${_exported_vars} ${var}.${${X_}_ld_hash}:= ${${var}} .export-env ${var}.${${X_}_ld_hash} .undef ${var}.${${X_}_ld_hash} .endfor .endif # ${ld} == "LD" || !empty(XLD) .endfor # .for ld in LD XLD .endif # !target(____) Index: projects/clang700-import/share/mk/src.libnames.mk =================================================================== --- projects/clang700-import/share/mk/src.libnames.mk (revision 340426) +++ projects/clang700-import/share/mk/src.libnames.mk (revision 340427) @@ -1,603 +1,605 @@ # $FreeBSD$ # # The include file define library names suitable # for INTERNALLIB and PRIVATELIB definition .if !target(____) .error src.libnames.mk cannot be included directly. .endif .if !target(____) ____: .include _PRIVATELIBS= \ atf_c \ atf_cxx \ bsdstat \ devdctl \ event \ heimipcc \ heimipcs \ ifconfig \ ldns \ sqlite3 \ ssh \ ucl \ unbound \ zstd _INTERNALLIBS= \ amu \ bsnmptools \ c_nossp_pic \ cron \ elftc \ fifolog \ ipf \ lpr \ netbsd \ ntp \ ntpevent \ openbsd \ opts \ parse \ pe \ pmcstat \ sl \ sm \ smdb \ smutil \ telnet \ vers _LIBRARIES= \ ${_PRIVATELIBS} \ ${_INTERNALLIBS} \ ${LOCAL_LIBRARIES} \ 80211 \ alias \ archive \ asn1 \ auditd \ avl \ be \ begemot \ bluetooth \ bsdxml \ bsm \ bsnmp \ bz2 \ c \ c_pic \ calendar \ cam \ casper \ cap_dns \ + cap_fileargs \ cap_grp \ cap_pwd \ cap_random \ cap_sysctl \ cap_syslog \ com_err \ compiler_rt \ crypt \ crypto \ ctf \ cuse \ cxxrt \ devctl \ devdctl \ devinfo \ devstat \ dialog \ dl \ dpv \ dtrace \ dwarf \ edit \ efivar \ elf \ execinfo \ fetch \ figpar \ geom \ gnuregex \ gpio \ gssapi \ gssapi_krb5 \ hdb \ heimbase \ heimntlm \ heimsqlite \ hx509 \ ipsec \ ipt \ jail \ kadm5clnt \ kadm5srv \ kafs5 \ kdc \ kiconv \ krb5 \ kvm \ l \ lzma \ m \ magic \ md \ memstat \ mp \ mt \ nandfs \ ncurses \ ncursesw \ netgraph \ ngatm \ nv \ nvpair \ opencsd \ opie \ pam \ panel \ panelw \ pcap \ pcsclite \ pjdlog \ pmc \ proc \ procstat \ pthread \ radius \ regex \ roken \ rpcsec_gss \ rpcsvc \ rt \ rtld_db \ sbuf \ sdp \ sm \ smb \ ssl \ ssp_nonshared \ stdthreads \ supcplusplus \ sysdecode \ tacplus \ termcap \ termcapw \ ufs \ ugidfw \ ulog \ umem \ usb \ usbhid \ util \ uutil \ vmmapi \ wind \ wrap \ xo \ y \ ypclnt \ z \ zfs_core \ zfs \ zpool \ .if ${MK_BLACKLIST} != "no" _LIBRARIES+= \ blacklist \ .endif .if ${MK_OFED} != "no" _LIBRARIES+= \ cxgb4 \ ibcm \ ibmad \ ibnetdisc \ ibumad \ ibverbs \ mlx4 \ mlx5 \ rdmacm \ osmcomp \ opensm \ osmvendor .endif # Each library's LIBADD needs to be duplicated here for static linkage of # 2nd+ order consumers. Auto-generating this would be better. _DP_80211= sbuf bsdxml _DP_archive= z bz2 lzma bsdxml _DP_zstd= pthread .if ${MK_BLACKLIST} != "no" _DP_blacklist+= pthread .endif _DP_crypto= pthread .if ${MK_OPENSSL} != "no" _DP_archive+= crypto .else _DP_archive+= md .endif _DP_sqlite3= pthread _DP_ssl= crypto _DP_ssh= crypto crypt z .if ${MK_LDNS} != "no" _DP_ssh+= ldns .endif _DP_edit= ncursesw .if ${MK_OPENSSL} != "no" _DP_bsnmp= crypto .endif _DP_geom= bsdxml sbuf _DP_cam= sbuf _DP_kvm= elf _DP_casper= nv _DP_cap_dns= nv +_DP_cap_fileargs= nv _DP_cap_grp= nv _DP_cap_pwd= nv _DP_cap_random= nv _DP_cap_sysctl= nv _DP_cap_syslog= nv .if ${MK_OFED} != "no" _DP_pcap= ibverbs mlx5 .endif _DP_pjdlog= util _DP_opie= md _DP_usb= pthread _DP_unbound= ssl crypto pthread _DP_rt= pthread .if ${MK_OPENSSL} == "no" _DP_radius= md .else _DP_radius= crypto .endif _DP_rtld_db= elf procstat _DP_procstat= kvm util elf .if ${MK_CXX} == "yes" .if ${MK_LIBCPLUSPLUS} != "no" _DP_proc= cxxrt .else _DP_proc= supcplusplus .endif .endif .if ${MK_CDDL} != "no" _DP_proc+= ctf .endif _DP_proc+= elf procstat rtld_db util _DP_mp= crypto _DP_memstat= kvm _DP_magic= z _DP_mt= sbuf bsdxml _DP_ldns= ssl crypto .if ${MK_OPENSSL} != "no" _DP_fetch= ssl crypto .else _DP_fetch= md .endif _DP_execinfo= elf _DP_dwarf= elf _DP_dpv= dialog figpar util ncursesw _DP_dialog= ncursesw m _DP_cuse= pthread _DP_atf_cxx= atf_c _DP_devstat= kvm _DP_pam= radius tacplus opie md util .if ${MK_KERBEROS} != "no" _DP_pam+= krb5 .endif .if ${MK_OPENSSH} != "no" _DP_pam+= ssh .endif .if ${MK_NIS} != "no" _DP_pam+= ypclnt .endif _DP_roken= crypt _DP_kadm5clnt= com_err krb5 roken _DP_kadm5srv= com_err hdb krb5 roken _DP_heimntlm= crypto com_err krb5 roken _DP_hx509= asn1 com_err crypto roken wind _DP_hdb= asn1 com_err krb5 roken sqlite3 _DP_asn1= com_err roken _DP_kdc= roken hdb hx509 krb5 heimntlm asn1 crypto _DP_wind= com_err roken _DP_heimbase= pthread _DP_heimipcc= heimbase roken pthread _DP_heimipcs= heimbase roken pthread _DP_kafs5= asn1 krb5 roken _DP_krb5+= asn1 com_err crypt crypto hx509 roken wind heimbase heimipcc _DP_gssapi_krb5+= gssapi krb5 crypto roken asn1 com_err _DP_lzma= pthread _DP_ucl= m _DP_vmmapi= util _DP_opencsd= cxxrt _DP_ctf= z _DP_dtrace= ctf elf proc pthread rtld_db _DP_xo= util # The libc dependencies are not strictly needed but are defined to make the # assert happy. _DP_c= compiler_rt .if ${MK_SSP} != "no" _DP_c+= ssp_nonshared .endif _DP_stdthreads= pthread _DP_tacplus= md _DP_panel= ncurses _DP_panelw= ncursesw _DP_rpcsec_gss= gssapi _DP_smb= kiconv _DP_ulog= md _DP_fifolog= z _DP_ipf= kvm _DP_zfs= md pthread umem util uutil m nvpair avl bsdxml geom nvpair z \ zfs_core _DP_zfs_core= nvpair _DP_zpool= md pthread z nvpair avl umem _DP_be= zfs nvpair # OFED support .if ${MK_OFED} != "no" _DP_cxgb4= ibverbs pthread _DP_ibcm= ibverbs _DP_ibmad= ibumad _DP_ibnetdisc= osmcomp ibmad ibumad _DP_ibumad= _DP_ibverbs= _DP_mlx4= ibverbs pthread _DP_mlx5= ibverbs pthread _DP_rdmacm= ibverbs _DP_osmcomp= pthread _DP_opensm= pthread _DP_osmvendor= ibumad pthread .endif # Define special cases LDADD_supcplusplus= -lsupc++ LIBATF_C= ${LIBDESTDIR}${LIBDIR_BASE}/libprivateatf-c.a LIBATF_CXX= ${LIBDESTDIR}${LIBDIR_BASE}/libprivateatf-c++.a LDADD_atf_c= -lprivateatf-c LDADD_atf_cxx= -lprivateatf-c++ .for _l in ${_PRIVATELIBS} LIB${_l:tu}?= ${LIBDESTDIR}${LIBDIR_BASE}/libprivate${_l}.a .endfor .for _l in ${_LIBRARIES} .if ${_INTERNALLIBS:M${_l}} || !defined(SYSROOT) LDADD_${_l}_L+= -L${LIB${_l:tu}DIR} .endif DPADD_${_l}?= ${LIB${_l:tu}} .if ${_PRIVATELIBS:M${_l}} LDADD_${_l}?= -lprivate${_l} .else LDADD_${_l}?= ${LDADD_${_l}_L} -l${_l} .endif # Add in all dependencies for static linkage. .if defined(_DP_${_l}) && (${_INTERNALLIBS:M${_l}} || \ (defined(NO_SHARED) && (${NO_SHARED} != "no" && ${NO_SHARED} != "NO"))) .for _d in ${_DP_${_l}} DPADD_${_l}+= ${DPADD_${_d}} LDADD_${_l}+= ${LDADD_${_d}} .endfor .endif .endfor # These are special cases where the library is broken and anything that uses # it needs to add more dependencies. Broken usually means that it has a # cyclic dependency and cannot link its own dependencies. This is bad, please # fix the library instead. # Unless the library itself is broken then the proper place to define # dependencies is _DP_* above. # libatf-c++ exposes libatf-c abi hence we need to explicit link to atf_c for # atf_cxx DPADD_atf_cxx+= ${DPADD_atf_c} LDADD_atf_cxx+= ${LDADD_atf_c} # Detect LDADD/DPADD that should be LIBADD, before modifying LDADD here. _BADLDADD= .for _l in ${LDADD:M-l*:N-l*/*:C,^-l,,} .if ${_LIBRARIES:M${_l}} && !${_PRIVATELIBS:M${_l}} _BADLDADD+= ${_l} .endif .endfor .if !empty(_BADLDADD) .error ${.CURDIR}: These libraries should be LIBADD+=foo rather than DPADD/LDADD+=-lfoo: ${_BADLDADD} .endif .for _l in ${LIBADD} DPADD+= ${DPADD_${_l}} LDADD+= ${LDADD_${_l}} .endfor # INTERNALLIB definitions. LIBELFTCDIR= ${OBJTOP}/lib/libelftc LIBELFTC?= ${LIBELFTCDIR}/libelftc.a LIBPEDIR= ${OBJTOP}/lib/libpe LIBPE?= ${LIBPEDIR}/libpe.a LIBOPENBSDDIR= ${OBJTOP}/lib/libopenbsd LIBOPENBSD?= ${LIBOPENBSDDIR}/libopenbsd.a LIBSMDIR= ${OBJTOP}/lib/libsm LIBSM?= ${LIBSMDIR}/libsm.a LIBSMDBDIR= ${OBJTOP}/lib/libsmdb LIBSMDB?= ${LIBSMDBDIR}/libsmdb.a LIBSMUTILDIR= ${OBJTOP}/lib/libsmutil LIBSMUTIL?= ${LIBSMUTILDIR}/libsmutil.a LIBNETBSDDIR?= ${OBJTOP}/lib/libnetbsd LIBNETBSD?= ${LIBNETBSDDIR}/libnetbsd.a LIBVERSDIR?= ${OBJTOP}/kerberos5/lib/libvers LIBVERS?= ${LIBVERSDIR}/libvers.a LIBSLDIR= ${OBJTOP}/kerberos5/lib/libsl LIBSL?= ${LIBSLDIR}/libsl.a LIBIPFDIR= ${OBJTOP}/sbin/ipf/libipf LIBIPF?= ${LIBIPFDIR}/libipf.a LIBTELNETDIR= ${OBJTOP}/lib/libtelnet LIBTELNET?= ${LIBTELNETDIR}/libtelnet.a LIBCRONDIR= ${OBJTOP}/usr.sbin/cron/lib LIBCRON?= ${LIBCRONDIR}/libcron.a LIBNTPDIR= ${OBJTOP}/usr.sbin/ntp/libntp LIBNTP?= ${LIBNTPDIR}/libntp.a LIBNTPEVENTDIR= ${OBJTOP}/usr.sbin/ntp/libntpevent LIBNTPEVENT?= ${LIBNTPEVENTDIR}/libntpevent.a LIBOPTSDIR= ${OBJTOP}/usr.sbin/ntp/libopts LIBOPTS?= ${LIBOPTSDIR}/libopts.a LIBPARSEDIR= ${OBJTOP}/usr.sbin/ntp/libparse LIBPARSE?= ${LIBPARSEDIR}/libparse.a LIBLPRDIR= ${OBJTOP}/usr.sbin/lpr/common_source LIBLPR?= ${LIBLPRDIR}/liblpr.a LIBFIFOLOGDIR= ${OBJTOP}/usr.sbin/fifolog/lib LIBFIFOLOG?= ${LIBFIFOLOGDIR}/libfifolog.a LIBBSNMPTOOLSDIR= ${OBJTOP}/usr.sbin/bsnmpd/tools/libbsnmptools LIBBSNMPTOOLS?= ${LIBBSNMPTOOLSDIR}/libbsnmptools.a LIBAMUDIR= ${OBJTOP}/usr.sbin/amd/libamu LIBAMU?= ${LIBAMUDIR}/libamu.a LIBBE?= ${LIBBEDIR}/libbe.a LIBPMCSTATDIR= ${OBJTOP}/lib/libpmcstat LIBPMCSTAT?= ${LIBPMCSTATDIR}/libpmcstat.a LIBC_NOSSP_PICDIR= ${OBJTOP}/lib/libc LIBC_NOSSP_PIC?= ${LIBC_NOSSP_PICDIR}/libc_nossp_pic.a # Define a directory for each library. This is useful for adding -L in when # not using a --sysroot or for meta mode bootstrapping when there is no # Makefile.depend. These are sorted by directory. LIBAVLDIR= ${OBJTOP}/cddl/lib/libavl LIBCTFDIR= ${OBJTOP}/cddl/lib/libctf LIBDTRACEDIR= ${OBJTOP}/cddl/lib/libdtrace LIBNVPAIRDIR= ${OBJTOP}/cddl/lib/libnvpair LIBUMEMDIR= ${OBJTOP}/cddl/lib/libumem LIBUUTILDIR= ${OBJTOP}/cddl/lib/libuutil LIBZFSDIR= ${OBJTOP}/cddl/lib/libzfs LIBZFS_COREDIR= ${OBJTOP}/cddl/lib/libzfs_core LIBZPOOLDIR= ${OBJTOP}/cddl/lib/libzpool # OFED support LIBCXGB4DIR= ${OBJTOP}/lib/ofed/libcxgb4 LIBIBCMDIR= ${OBJTOP}/lib/ofed/libibcm LIBIBMADDIR= ${OBJTOP}/lib/ofed/libibmad LIBIBNETDISCDIR=${OBJTOP}/lib/ofed/libibnetdisc LIBIBUMADDIR= ${OBJTOP}/lib/ofed/libibumad LIBIBVERBSDIR= ${OBJTOP}/lib/ofed/libibverbs LIBMLX4DIR= ${OBJTOP}/lib/ofed/libmlx4 LIBMLX5DIR= ${OBJTOP}/lib/ofed/libmlx5 LIBRDMACMDIR= ${OBJTOP}/lib/ofed/librdmacm LIBOSMCOMPDIR= ${OBJTOP}/lib/ofed/complib LIBOPENSMDIR= ${OBJTOP}/lib/ofed/libopensm LIBOSMVENDORDIR=${OBJTOP}/lib/ofed/libvendor LIBDIALOGDIR= ${OBJTOP}/gnu/lib/libdialog LIBGCOVDIR= ${OBJTOP}/gnu/lib/libgcov LIBGOMPDIR= ${OBJTOP}/gnu/lib/libgomp LIBGNUREGEXDIR= ${OBJTOP}/gnu/lib/libregex LIBSSPDIR= ${OBJTOP}/gnu/lib/libssp LIBSSP_NONSHAREDDIR= ${OBJTOP}/gnu/lib/libssp/libssp_nonshared LIBSUPCPLUSPLUSDIR= ${OBJTOP}/gnu/lib/libsupc++ LIBASN1DIR= ${OBJTOP}/kerberos5/lib/libasn1 LIBGSSAPI_KRB5DIR= ${OBJTOP}/kerberos5/lib/libgssapi_krb5 LIBGSSAPI_NTLMDIR= ${OBJTOP}/kerberos5/lib/libgssapi_ntlm LIBGSSAPI_SPNEGODIR= ${OBJTOP}/kerberos5/lib/libgssapi_spnego LIBHDBDIR= ${OBJTOP}/kerberos5/lib/libhdb LIBHEIMBASEDIR= ${OBJTOP}/kerberos5/lib/libheimbase LIBHEIMIPCCDIR= ${OBJTOP}/kerberos5/lib/libheimipcc LIBHEIMIPCSDIR= ${OBJTOP}/kerberos5/lib/libheimipcs LIBHEIMNTLMDIR= ${OBJTOP}/kerberos5/lib/libheimntlm LIBHX509DIR= ${OBJTOP}/kerberos5/lib/libhx509 LIBKADM5CLNTDIR= ${OBJTOP}/kerberos5/lib/libkadm5clnt LIBKADM5SRVDIR= ${OBJTOP}/kerberos5/lib/libkadm5srv LIBKAFS5DIR= ${OBJTOP}/kerberos5/lib/libkafs5 LIBKDCDIR= ${OBJTOP}/kerberos5/lib/libkdc LIBKRB5DIR= ${OBJTOP}/kerberos5/lib/libkrb5 LIBROKENDIR= ${OBJTOP}/kerberos5/lib/libroken LIBWINDDIR= ${OBJTOP}/kerberos5/lib/libwind LIBATF_CDIR= ${OBJTOP}/lib/atf/libatf-c LIBATF_CXXDIR= ${OBJTOP}/lib/atf/libatf-c++ LIBALIASDIR= ${OBJTOP}/lib/libalias/libalias LIBBLACKLISTDIR= ${OBJTOP}/lib/libblacklist LIBBLOCKSRUNTIMEDIR= ${OBJTOP}/lib/libblocksruntime LIBBSNMPDIR= ${OBJTOP}/lib/libbsnmp/libbsnmp LIBCASPERDIR= ${OBJTOP}/lib/libcasper/libcasper LIBCAP_DNSDIR= ${OBJTOP}/lib/libcasper/services/cap_dns LIBCAP_GRPDIR= ${OBJTOP}/lib/libcasper/services/cap_grp LIBCAP_PWDDIR= ${OBJTOP}/lib/libcasper/services/cap_pwd LIBCAP_RANDOMDIR= ${OBJTOP}/lib/libcasper/services/cap_random LIBCAP_SYSCTLDIR= ${OBJTOP}/lib/libcasper/services/cap_sysctl LIBCAP_SYSLOGDIR= ${OBJTOP}/lib/libcasper/services/cap_syslog LIBBSDXMLDIR= ${OBJTOP}/lib/libexpat LIBKVMDIR= ${OBJTOP}/lib/libkvm LIBPTHREADDIR= ${OBJTOP}/lib/libthr LIBMDIR= ${OBJTOP}/lib/msun LIBFORMDIR= ${OBJTOP}/lib/ncurses/form LIBFORMLIBWDIR= ${OBJTOP}/lib/ncurses/formw LIBMENUDIR= ${OBJTOP}/lib/ncurses/menu LIBMENULIBWDIR= ${OBJTOP}/lib/ncurses/menuw LIBNCURSESDIR= ${OBJTOP}/lib/ncurses/ncurses LIBNCURSESWDIR= ${OBJTOP}/lib/ncurses/ncursesw LIBPANELDIR= ${OBJTOP}/lib/ncurses/panel LIBPANELWDIR= ${OBJTOP}/lib/ncurses/panelw LIBCRYPTODIR= ${OBJTOP}/secure/lib/libcrypto LIBSSHDIR= ${OBJTOP}/secure/lib/libssh LIBSSLDIR= ${OBJTOP}/secure/lib/libssl LIBTEKENDIR= ${OBJTOP}/sys/teken/libteken LIBEGACYDIR= ${OBJTOP}/tools/build LIBLNDIR= ${OBJTOP}/usr.bin/lex/lib LIBTERMCAPDIR= ${LIBNCURSESDIR} LIBTERMCAPWDIR= ${LIBNCURSESWDIR} # Default other library directories to lib/libNAME. .for lib in ${_LIBRARIES} LIB${lib:tu}DIR?= ${OBJTOP}/lib/lib${lib} .endfor # Validate that listed LIBADD are valid. .for _l in ${LIBADD} .if empty(_LIBRARIES:M${_l}) _BADLIBADD+= ${_l} .endif .endfor .if !empty(_BADLIBADD) .error ${.CURDIR}: Invalid LIBADD used which may need to be added to ${_this:T}: ${_BADLIBADD} .endif # Sanity check that libraries are defined here properly when building them. .if defined(LIB) && ${_LIBRARIES:M${LIB}} != "" .if !empty(LIBADD) && \ (!defined(_DP_${LIB}) || ${LIBADD:O:u} != ${_DP_${LIB}:O:u}) .error ${.CURDIR}: Missing or incorrect _DP_${LIB} entry in ${_this:T}. Should match LIBADD for ${LIB} ('${LIBADD}' vs '${_DP_${LIB}}') .endif # Note that OBJTOP is not yet defined here but for the purpose of the check # it is fine as it resolves to the SRC directory. .if !defined(LIB${LIB:tu}DIR) || !exists(${SRCTOP}/${LIB${LIB:tu}DIR:S,^${OBJTOP}/,,}) .error ${.CURDIR}: Missing or incorrect value for LIB${LIB:tu}DIR in ${_this:T}: ${LIB${LIB:tu}DIR:S,^${OBJTOP}/,,} .endif .if ${_INTERNALLIBS:M${LIB}} != "" && !defined(LIB${LIB:tu}) .error ${.CURDIR}: Missing value for LIB${LIB:tu} in ${_this:T}. Likely should be: LIB${LIB:tu}?= $${LIB${LIB:tu}DIR}/lib${LIB}.a .endif .endif .endif # !target(____) Index: projects/clang700-import/sys/amd64/amd64/machdep.c =================================================================== --- projects/clang700-import/sys/amd64/amd64/machdep.c (revision 340426) +++ projects/clang700-import/sys/amd64/amd64/machdep.c (revision 340427) @@ -1,2737 +1,2738 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 Peter Wemm. * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_atpic.h" #include "opt_cpu.h" #include "opt_ddb.h" #include "opt_inet.h" #include "opt_isa.h" #include "opt_kstack_pages.h" #include "opt_maxmem.h" #include "opt_mp_watchdog.h" #include "opt_pci.h" #include "opt_platform.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #ifndef KDB #error KDB must be enabled in order for DDB to work! #endif #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #ifdef FDT #include #endif #ifdef DEV_ATPIC #include #else #include #endif #include #include #include /* Sanity check for __curthread() */ CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); /* * The PTI trampoline stack needs enough space for a hardware trapframe and a * couple of scratch registers, as well as the trapframe left behind after an * iret fault. */ CTASSERT(PC_PTI_STACK_SZ * sizeof(register_t) >= 2 * sizeof(struct pti_frame) - offsetof(struct pti_frame, pti_rip)); extern u_int64_t hammer_time(u_int64_t, u_int64_t); #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) static void cpu_startup(void *); static void get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave, size_t xfpusave_len); static int set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate, size_t xfpustate_len); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); /* Preload data parse function */ static caddr_t native_parse_preload_data(u_int64_t); /* Native function to fetch and parse the e820 map */ static void native_parse_memmap(caddr_t, vm_paddr_t *, int *); /* Default init_ops implementation. */ struct init_ops init_ops = { .parse_preload_data = native_parse_preload_data, .early_clock_source_init = i8254_init, .early_delay = i8254_delay, .parse_memmap = native_parse_memmap, #ifdef SMP .mp_bootaddress = mp_bootaddress, .start_all_aps = native_start_all_aps, #endif #ifdef DEV_PCI .msi_init = msi_init, #endif }; /* * Physical address of the EFI System Table. Stashed from the metadata hints * passed into the kernel and used by the EFI code to call runtime services. */ vm_paddr_t efi_systbl_phys; /* Intel ICH registers */ #define ICH_PMBASE 0x400 #define ICH_SMI_EN ICH_PMBASE + 0x30 int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel; int cold = 1; long Maxmem = 0; long realmem = 0; /* * The number of PHYSMAP entries must be one less than the number of * PHYSSEG entries because the PHYSMAP entry that spans the largest * physical address that is accessible by ISA DMA is split into two * PHYSSEG entries. */ #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; /* must be 2 less so 0 0 can signal end of chunks */ #define PHYS_AVAIL_ARRAY_END (nitems(phys_avail) - 2) #define DUMP_AVAIL_ARRAY_END (nitems(dump_avail) - 2) struct kva_md_info kmi; static struct trapframe proc0_tf; struct region_descriptor r_gdt, r_idt; struct pcpu __pcpu[MAXCPU]; struct mtx icu_lock; struct mem_range_softc mem_range_softc; struct mtx dt_lock; /* lock for GDT and LDT */ void (*vmm_resume_p)(void); static void cpu_startup(dummy) void *dummy; { uintmax_t memsize; char *sysenv; /* * On MacBooks, we need to disallow the legacy USB circuit to * generate an SMI# because this can cause several problems, * namely: incorrect CPU frequency detection and failure to * start the APs. * We do this by disabling a bit in the SMI_EN (SMI Control and * Enable register) of the Intel ICH LPC Interface Bridge. */ sysenv = kern_getenv("smbios.system.product"); if (sysenv != NULL) { if (strncmp(sysenv, "MacBook1,1", 10) == 0 || strncmp(sysenv, "MacBook3,1", 10) == 0 || strncmp(sysenv, "MacBook4,1", 10) == 0 || strncmp(sysenv, "MacBookPro1,1", 13) == 0 || strncmp(sysenv, "MacBookPro1,2", 13) == 0 || strncmp(sysenv, "MacBookPro3,1", 13) == 0 || strncmp(sysenv, "MacBookPro4,1", 13) == 0 || strncmp(sysenv, "Macmini1,1", 10) == 0) { if (bootverbose) printf("Disabling LEGACY_USB_EN bit on " "Intel ICH.\n"); outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); } freeenv(sysenv); } /* * Good {morning,afternoon,evening,night}. */ startrtclock(); printcpuinfo(); /* * Display physical memory if SMBIOS reports reasonable amount. */ memsize = 0; sysenv = kern_getenv("smbios.memory.enabled"); if (sysenv != NULL) { memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; freeenv(sysenv); } if (memsize < ptoa((uintmax_t)vm_free_count())) memsize = ptoa((uintmax_t)Maxmem); printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); realmem = atop(memsize); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size; size = phys_avail[indx + 1] - phys_avail[indx]; printf( "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)vm_free_count()), ptoa((uintmax_t)vm_free_count()) / 1048576); #ifdef DEV_PCI if (bootverbose && intel_graphics_stolen_base != 0) printf("intel stolen mem: base %#jx size %ju MB\n", (uintmax_t)intel_graphics_stolen_base, (uintmax_t)intel_graphics_stolen_size / 1024 / 1024); #endif /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); cpu_setregs(); } /* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by call * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe sf, *sfp; struct pcb *pcb; struct proc *p; struct thread *td; struct sigacts *psp; char *sp; struct trapframe *regs; char *xfpusave; size_t xfpusave_len; int sig; int oonstack; td = curthread; pcb = td->td_pcb; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->tf_rsp); if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) { xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu); xfpusave = __builtin_alloca(xfpusave_len); } else { xfpusave_len = 0; xfpusave = NULL; } /* Save user context. */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len); fpstate_drop(td); update_pcb_bases(pcb); sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase; sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase; bzero(sf.sf_uc.uc_mcontext.mc_spare, sizeof(sf.sf_uc.uc_mcontext.mc_spare)); bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__)); /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size; #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else sp = (char *)regs->tf_rsp - 128; if (xfpusave != NULL) { sp -= xfpusave_len; sp = (char *)((unsigned long)sp & ~0x3Ful); sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp; } sp -= sizeof(struct sigframe); /* Align to 16 bytes. */ sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); /* Build the argument list for the signal handler. */ regs->tf_rdi = sig; /* arg 1 in %rdi */ regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ bzero(&sf.sf_si, sizeof(sf.sf_si)); if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; /* Fill in POSIX parts */ sf.sf_si = ksi->ksi_info; sf.sf_si.si_signo = sig; /* maybe a translated signal */ regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ } else { /* Old FreeBSD-style arguments. */ regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */ regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ sf.sf_ahu.sf_handler = catcher; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(*sfp)) != 0 || (xfpusave != NULL && copyout(xfpusave, (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len) != 0)) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_rsp = (long)sfp; regs->tf_rip = p->p_sysent->sv_sigcode_base; regs->tf_rflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_ss = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _ufssel; regs->tf_gs = _ugssel; regs->tf_flags = TF_HASSEGS; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * state to gain improper privileges. * * MPSAFE */ int sys_sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; struct pcb *pcb; struct proc *p; struct trapframe *regs; ucontext_t *ucp; char *xfpustate; size_t xfpustate_len; long rflags; int cs, error, ret; ksiginfo_t ksi; pcb = td->td_pcb; p = td->td_proc; error = copyin(uap->sigcntxp, &uc, sizeof(uc)); if (error != 0) { uprintf("pid %d (%s): sigreturn copyin failed\n", p->p_pid, td->td_name); return (error); } ucp = &uc; if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) { uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid, td->td_name, ucp->uc_mcontext.mc_flags); return (EINVAL); } regs = td->td_frame; rflags = ucp->uc_mcontext.mc_rflags; /* * Don't allow users to change privileged or reserved flags. */ if (!EFL_SECURE(rflags, regs->tf_rflags)) { uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid, td->td_name, rflags); return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ cs = ucp->uc_mcontext.mc_cs; if (!CS_SECURE(cs)) { uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid, td->td_name, cs); ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_rip; trapsignal(td, &ksi); return (EINVAL); } if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) { xfpustate_len = uc.uc_mcontext.mc_xfpustate_len; if (xfpustate_len > cpu_max_ext_state_size - sizeof(struct savefpu)) { uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n", p->p_pid, td->td_name, xfpustate_len); return (EINVAL); } xfpustate = __builtin_alloca(xfpustate_len); error = copyin((const void *)uc.uc_mcontext.mc_xfpustate, xfpustate, xfpustate_len); if (error != 0) { uprintf( "pid %d (%s): sigreturn copying xfpustate failed\n", p->p_pid, td->td_name); return (error); } } else { xfpustate = NULL; xfpustate_len = 0; } ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len); if (ret != 0) { uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n", p->p_pid, td->td_name, ret); return (ret); } bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); update_pcb_bases(pcb); pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase; pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase; #if defined(COMPAT_43) if (ucp->uc_mcontext.mc_onstack & 1) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); return (EJUSTRETURN); } #ifdef COMPAT_FREEBSD4 int freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) { return sys_sigreturn(td, (struct sigreturn_args *)uap); } #endif /* * Reset registers to default values on exec. */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *regs; struct pcb *pcb; register_t saved_rflags; regs = td->td_frame; pcb = td->td_pcb; if (td->td_proc->p_md.md_ldt != NULL) user_ldt_free(td); update_pcb_bases(pcb); pcb->pcb_fsbase = 0; pcb->pcb_gsbase = 0; clear_pcb_flags(pcb, PCB_32BIT); pcb->pcb_initial_fpucw = __INITIAL_FPUCW__; saved_rflags = regs->tf_rflags & PSL_T; bzero((char *)regs, sizeof(struct trapframe)); regs->tf_rip = imgp->entry_addr; regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; regs->tf_rdi = stack; /* argv */ regs->tf_rflags = PSL_USER | saved_rflags; regs->tf_ss = _udatasel; regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _ufssel; regs->tf_gs = _ugssel; regs->tf_flags = TF_HASSEGS; /* * Reset the hardware debug registers if they were in use. * They won't have any meaning for the newly exec'd process. */ if (pcb->pcb_flags & PCB_DBREGS) { pcb->pcb_dr0 = 0; pcb->pcb_dr1 = 0; pcb->pcb_dr2 = 0; pcb->pcb_dr3 = 0; pcb->pcb_dr6 = 0; pcb->pcb_dr7 = 0; if (pcb == curpcb) { /* * Clear the debug registers on the running * CPU, otherwise they will end up affecting * the next process we switch to. */ reset_dbregs(); } clear_pcb_flags(pcb, PCB_DBREGS); } /* * Drop the FP state if we hold it, so that the process gets a * clean FP state if it uses the FPU again. */ fpstate_drop(td); } void cpu_setregs(void) { register_t cr0; cr0 = rcr0(); /* * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the * BSP. See the comments there about why we set them. */ cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; load_cr0(cr0); } /* * Initialize amd64 and configure to run kernel */ /* * Initialize segments & interrupt table */ struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */ static struct gate_descriptor idt0[NIDT]; struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ static char dblfault_stack[PAGE_SIZE] __aligned(16); static char mce0_stack[PAGE_SIZE] __aligned(16); static char nmi0_stack[PAGE_SIZE] __aligned(16); static char dbg0_stack[PAGE_SIZE] __aligned(16); CTASSERT(sizeof(struct nmi_pcpu) == 16); struct amd64tss common_tss[MAXCPU]; /* * Software prototypes -- in more palatable form. * * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same * slots as corresponding segments for i386 kernel. */ struct soft_segment_descriptor gdt_segs[] = { /* GNULL_SEL 0 Null Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GNULL2_SEL 1 Null Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GUFS32_SEL 2 32 bit %gs Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUGS32_SEL 3 32 bit %fs Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GCODE_SEL 4 Code Descriptor for kernel */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_long = 1, .ssd_def32 = 0, .ssd_gran = 1 }, /* GDATA_SEL 5 Data Descriptor for kernel */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_long = 1, .ssd_def32 = 0, .ssd_gran = 1 }, /* GUCODE32_SEL 6 32 bit Code Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUDATA_SEL 7 32/64 bit Data Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMRWA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 1, .ssd_gran = 1 }, /* GUCODE_SEL 8 64 bit Code Descriptor for user */ { .ssd_base = 0x0, .ssd_limit = 0xfffff, .ssd_type = SDT_MEMERA, .ssd_dpl = SEL_UPL, .ssd_p = 1, .ssd_long = 1, .ssd_def32 = 0, .ssd_gran = 1 }, /* GPROC0_SEL 9 Proc 0 Tss Descriptor */ { .ssd_base = 0x0, .ssd_limit = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE - 1, .ssd_type = SDT_SYSTSS, .ssd_dpl = SEL_KPL, .ssd_p = 1, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* Actually, the TSS is a system descriptor which is double size */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GUSERLDT_SEL 11 LDT Descriptor */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, /* GUSERLDT_SEL 12 LDT Descriptor, double size */ { .ssd_base = 0x0, .ssd_limit = 0x0, .ssd_type = 0, .ssd_dpl = 0, .ssd_p = 0, .ssd_long = 0, .ssd_def32 = 0, .ssd_gran = 0 }, }; void setidt(int idx, inthand_t *func, int typ, int dpl, int ist) { struct gate_descriptor *ip; ip = idt + idx; ip->gd_looffset = (uintptr_t)func; ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); ip->gd_ist = ist; ip->gd_xx = 0; ip->gd_type = typ; ip->gd_dpl = dpl; ip->gd_p = 1; ip->gd_hioffset = ((uintptr_t)func)>>16 ; } extern inthand_t IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), IDTVEC(xmm), IDTVEC(dblfault), IDTVEC(div_pti), IDTVEC(bpt_pti), IDTVEC(ofl_pti), IDTVEC(bnd_pti), IDTVEC(ill_pti), IDTVEC(dna_pti), IDTVEC(fpusegm_pti), IDTVEC(tss_pti), IDTVEC(missing_pti), IDTVEC(stk_pti), IDTVEC(prot_pti), IDTVEC(page_pti), IDTVEC(rsvd_pti), IDTVEC(fpu_pti), IDTVEC(align_pti), IDTVEC(xmm_pti), #ifdef KDTRACE_HOOKS IDTVEC(dtrace_ret), IDTVEC(dtrace_ret_pti), #endif #ifdef XENHVM IDTVEC(xen_intr_upcall), IDTVEC(xen_intr_upcall_pti), #endif IDTVEC(fast_syscall), IDTVEC(fast_syscall32), IDTVEC(fast_syscall_pti); #ifdef DDB /* * Display the index and function name of any IDT entries that don't use * the default 'rsvd' entry point. */ DB_SHOW_COMMAND(idt, db_show_idt) { struct gate_descriptor *ip; int idx; uintptr_t func; ip = idt; for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset); if (func != (uintptr_t)&IDTVEC(rsvd)) { db_printf("%3d\t", idx); db_printsym(func, DB_STGY_PROC); db_printf("\n"); } ip++; } } /* Show privileged registers. */ DB_SHOW_COMMAND(sysregs, db_show_sysregs) { struct { uint16_t limit; uint64_t base; } __packed idtr, gdtr; uint16_t ldt, tr; __asm __volatile("sidt %0" : "=m" (idtr)); db_printf("idtr\t0x%016lx/%04x\n", (u_long)idtr.base, (u_int)idtr.limit); __asm __volatile("sgdt %0" : "=m" (gdtr)); db_printf("gdtr\t0x%016lx/%04x\n", (u_long)gdtr.base, (u_int)gdtr.limit); __asm __volatile("sldt %0" : "=r" (ldt)); db_printf("ldtr\t0x%04x\n", ldt); __asm __volatile("str %0" : "=r" (tr)); db_printf("tr\t0x%04x\n", tr); db_printf("cr0\t0x%016lx\n", rcr0()); db_printf("cr2\t0x%016lx\n", rcr2()); db_printf("cr3\t0x%016lx\n", rcr3()); db_printf("cr4\t0x%016lx\n", rcr4()); if (rcr4() & CR4_XSAVE) db_printf("xcr0\t0x%016lx\n", rxcr(0)); db_printf("EFER\t0x%016lx\n", rdmsr(MSR_EFER)); if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX)) db_printf("FEATURES_CTL\t%016lx\n", rdmsr(MSR_IA32_FEATURE_CONTROL)); db_printf("DEBUG_CTL\t0x%016lx\n", rdmsr(MSR_DEBUGCTLMSR)); db_printf("PAT\t0x%016lx\n", rdmsr(MSR_PAT)); db_printf("GSBASE\t0x%016lx\n", rdmsr(MSR_GSBASE)); } DB_SHOW_COMMAND(dbregs, db_show_dbregs) { db_printf("dr0\t0x%016lx\n", rdr0()); db_printf("dr1\t0x%016lx\n", rdr1()); db_printf("dr2\t0x%016lx\n", rdr2()); db_printf("dr3\t0x%016lx\n", rdr3()); db_printf("dr6\t0x%016lx\n", rdr6()); db_printf("dr7\t0x%016lx\n", rdr7()); } #endif void sdtossd(sd, ssd) struct user_segment_descriptor *sd; struct soft_segment_descriptor *ssd; { ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; ssd->ssd_type = sd->sd_type; ssd->ssd_dpl = sd->sd_dpl; ssd->ssd_p = sd->sd_p; ssd->ssd_long = sd->sd_long; ssd->ssd_def32 = sd->sd_def32; ssd->ssd_gran = sd->sd_gran; } void ssdtosd(ssd, sd) struct soft_segment_descriptor *ssd; struct user_segment_descriptor *sd; { sd->sd_lobase = (ssd->ssd_base) & 0xffffff; sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; sd->sd_type = ssd->ssd_type; sd->sd_dpl = ssd->ssd_dpl; sd->sd_p = ssd->ssd_p; sd->sd_long = ssd->ssd_long; sd->sd_def32 = ssd->ssd_def32; sd->sd_gran = ssd->ssd_gran; } void ssdtosyssd(ssd, sd) struct soft_segment_descriptor *ssd; struct system_segment_descriptor *sd; { sd->sd_lobase = (ssd->ssd_base) & 0xffffff; sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; sd->sd_type = ssd->ssd_type; sd->sd_dpl = ssd->ssd_dpl; sd->sd_p = ssd->ssd_p; sd->sd_gran = ssd->ssd_gran; } #if !defined(DEV_ATPIC) && defined(DEV_ISA) #include #include /* * Return a bitmap of the current interrupt requests. This is 8259-specific * and is only suitable for use at probe time. * This is only here to pacify sio. It is NOT FATAL if this doesn't work. * It shouldn't be here. There should probably be an APIC centric * implementation in the apic driver code, if at all. */ intrmask_t isa_irq_pending(void) { u_char irr1; u_char irr2; irr1 = inb(IO_ICU1); irr2 = inb(IO_ICU2); return ((irr2 << 8) | irr1); } #endif u_int basemem; static int add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, int *physmap_idxp) { int i, insert_idx, physmap_idx; physmap_idx = *physmap_idxp; if (length == 0) return (1); /* * Find insertion point while checking for overlap. Start off by * assuming the new entry will be added to the end. * * NB: physmap_idx points to the next free slot. */ insert_idx = physmap_idx; for (i = 0; i <= physmap_idx; i += 2) { if (base < physmap[i + 1]) { if (base + length <= physmap[i]) { insert_idx = i; break; } if (boothowto & RB_VERBOSE) printf( "Overlapping memory regions, ignoring second region\n"); return (1); } } /* See if we can prepend to the next entry. */ if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) { physmap[insert_idx] = base; return (1); } /* See if we can append to the previous entry. */ if (insert_idx > 0 && base == physmap[insert_idx - 1]) { physmap[insert_idx - 1] += length; return (1); } physmap_idx += 2; *physmap_idxp = physmap_idx; if (physmap_idx == PHYSMAP_SIZE) { printf( "Too many segments in the physical address map, giving up\n"); return (0); } /* * Move the last 'N' entries down to make room for the new * entry if needed. */ for (i = (physmap_idx - 2); i > insert_idx; i -= 2) { physmap[i] = physmap[i - 2]; physmap[i + 1] = physmap[i - 1]; } /* Insert the new entry. */ physmap[insert_idx] = base; physmap[insert_idx + 1] = base + length; return (1); } void bios_add_smap_entries(struct bios_smap *smapbase, u_int32_t smapsize, vm_paddr_t *physmap, int *physmap_idx) { struct bios_smap *smap, *smapend; smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); for (smap = smapbase; smap < smapend; smap++) { if (boothowto & RB_VERBOSE) printf("SMAP type=%02x base=%016lx len=%016lx\n", smap->type, smap->base, smap->length); if (smap->type != SMAP_TYPE_MEMORY) continue; if (!add_physmap_entry(smap->base, smap->length, physmap, physmap_idx)) break; } } static void add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap, int *physmap_idx) { struct efi_md *map, *p; const char *type; size_t efisz; int ndesc, i; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode", "PersistentMemory" }; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; if (boothowto & RB_VERBOSE) printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { if (boothowto & RB_VERBOSE) { if (p->md_type < nitems(types)) type = types[p->md_type]; else type = ""; printf("%23s %012lx %12p %08lx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_NV) printf("NV "); if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE) printf("MORE_RELIABLE "); if (p->md_attr & EFI_MD_ATTR_RO) printf("RO "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: continue; } if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE), physmap, physmap_idx)) break; } } static char bootmethod[16] = ""; SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0, "System firmware boot method"); static void native_parse_memmap(caddr_t kmdp, vm_paddr_t *physmap, int *physmap_idx) { struct bios_smap *smap; struct efi_map_header *efihdr; u_int32_t size; /* * Memory map from INT 15:E820. * * subr_module.c says: * "Consumer may safely assume that size value precedes data." * ie: an int32_t immediately precedes smap. */ efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); smap = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP); if (efihdr == NULL && smap == NULL) panic("No BIOS smap or EFI map info from loader!"); if (efihdr != NULL) { add_efi_map_entries(efihdr, physmap, physmap_idx); strlcpy(bootmethod, "UEFI", sizeof(bootmethod)); } else { size = *((u_int32_t *)smap - 1); bios_add_smap_entries(smap, size, physmap, physmap_idx); strlcpy(bootmethod, "BIOS", sizeof(bootmethod)); } } #define PAGES_PER_GB (1024 * 1024 * 1024 / PAGE_SIZE) /* * Populate the (physmap) array with base/bound pairs describing the * available physical memory in the system, then test this memory and * build the phys_avail array describing the actually-available memory. * * Total memory size may be set by the kernel environment variable * hw.physmem or the compile-time define MAXMEM. * * XXX first should be vm_paddr_t. */ static void getmemsize(caddr_t kmdp, u_int64_t first) { int i, physmap_idx, pa_indx, da_indx; vm_paddr_t pa, physmap[PHYSMAP_SIZE]; u_long physmem_start, physmem_tunable, memtest; pt_entry_t *pte; quad_t dcons_addr, dcons_size; int page_counter; /* * Tell the physical memory allocator about pages used to store * the kernel and preloaded data. See kmem_bootstrap_free(). */ vm_phys_add_seg((vm_paddr_t)kernphys, trunc_page(first)); bzero(physmap, sizeof(physmap)); physmap_idx = 0; init_ops.parse_memmap(kmdp, physmap, &physmap_idx); physmap_idx -= 2; /* * Find the 'base memory' segment for SMP */ basemem = 0; for (i = 0; i <= physmap_idx; i += 2) { if (physmap[i] <= 0xA0000) { basemem = physmap[i + 1] / 1024; break; } } if (basemem == 0 || basemem > 640) { if (bootverbose) printf( "Memory map doesn't contain a basemem segment, faking it"); basemem = 640; } /* * Maxmem isn't the "maximum memory", it's one larger than the * highest page of the physical address space. It should be * called something like "Maxphyspage". We may adjust this * based on ``hw.physmem'' and the results of the memory test. */ Maxmem = atop(physmap[physmap_idx + 1]); #ifdef MAXMEM Maxmem = MAXMEM / 4; #endif if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) Maxmem = atop(physmem_tunable); /* * The boot memory test is disabled by default, as it takes a * significant amount of time on large-memory systems, and is * unfriendly to virtual machines as it unnecessarily touches all * pages. * * A general name is used as the code may be extended to support * additional tests beyond the current "page present" test. */ memtest = 0; TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest); /* * Don't allow MAXMEM or hw.physmem to extend the amount of memory * in the system. */ if (Maxmem > atop(physmap[physmap_idx + 1])) Maxmem = atop(physmap[physmap_idx + 1]); if (atop(physmap[physmap_idx + 1]) != Maxmem && (boothowto & RB_VERBOSE)) printf("Physical memory use set to %ldK\n", Maxmem * 4); /* * Make hole for "AP -> long mode" bootstrap code. The * mp_bootaddress vector is only available when the kernel * is configured to support APs and APs for the system start * in real mode mode (e.g. SMP bare metal). */ if (init_ops.mp_bootaddress) init_ops.mp_bootaddress(physmap, &physmap_idx); /* call pmap initialization to make new kernel address space */ pmap_bootstrap(&first); /* * Size up each available chunk of physical memory. * * XXX Some BIOSes corrupt low 64KB between suspend and resume. * By default, mask off the first 16 pages unless we appear to be * running in a VM. */ physmem_start = (vm_guest > VM_GUEST_NO ? 1 : 16) << PAGE_SHIFT; TUNABLE_ULONG_FETCH("hw.physmem.start", &physmem_start); if (physmap[0] < physmem_start) { if (physmem_start < PAGE_SIZE) physmap[0] = PAGE_SIZE; else if (physmem_start >= physmap[1]) physmap[0] = round_page(physmap[1] - PAGE_SIZE); else physmap[0] = round_page(physmem_start); } pa_indx = 0; da_indx = 1; phys_avail[pa_indx++] = physmap[0]; phys_avail[pa_indx] = physmap[0]; dump_avail[da_indx] = physmap[0]; pte = CMAP1; /* * Get dcons buffer address */ if (getenv_quad("dcons.addr", &dcons_addr) == 0 || getenv_quad("dcons.size", &dcons_size) == 0) dcons_addr = 0; /* * physmap is in bytes, so when converting to page boundaries, * round up the start address and round down the end address. */ page_counter = 0; if (memtest != 0) printf("Testing system memory"); for (i = 0; i <= physmap_idx; i += 2) { vm_paddr_t end; end = ptoa((vm_paddr_t)Maxmem); if (physmap[i + 1] < end) end = trunc_page(physmap[i + 1]); for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { int tmp, page_bad, full; int *ptr = (int *)CADDR1; full = FALSE; /* * block out kernel memory as not available. */ if (pa >= (vm_paddr_t)kernphys && pa < first) goto do_dump_avail; /* * block out dcons buffer */ if (dcons_addr > 0 && pa >= trunc_page(dcons_addr) && pa < dcons_addr + dcons_size) goto do_dump_avail; page_bad = FALSE; if (memtest == 0) goto skip_memtest; /* * Print a "." every GB to show we're making * progress. */ page_counter++; if ((page_counter % PAGES_PER_GB) == 0) printf("."); /* * map page into kernel: valid, read/write,non-cacheable */ *pte = pa | PG_V | PG_RW | PG_NC_PWT | PG_NC_PCD; invltlb(); tmp = *(int *)ptr; /* * Test for alternating 1's and 0's */ *(volatile int *)ptr = 0xaaaaaaaa; if (*(volatile int *)ptr != 0xaaaaaaaa) page_bad = TRUE; /* * Test for alternating 0's and 1's */ *(volatile int *)ptr = 0x55555555; if (*(volatile int *)ptr != 0x55555555) page_bad = TRUE; /* * Test for all 1's */ *(volatile int *)ptr = 0xffffffff; if (*(volatile int *)ptr != 0xffffffff) page_bad = TRUE; /* * Test for all 0's */ *(volatile int *)ptr = 0x0; if (*(volatile int *)ptr != 0x0) page_bad = TRUE; /* * Restore original value. */ *(int *)ptr = tmp; skip_memtest: /* * Adjust array of valid/good pages. */ if (page_bad == TRUE) continue; /* * If this good page is a continuation of the * previous set of good pages, then just increase * the end pointer. Otherwise start a new chunk. * Note that "end" points one higher than end, * making the range >= start and < end. * If we're also doing a speculative memory * test and we at or past the end, bump up Maxmem * so that we keep going. The first bad page * will terminate the loop. */ if (phys_avail[pa_indx] == pa) { phys_avail[pa_indx] += PAGE_SIZE; } else { pa_indx++; if (pa_indx == PHYS_AVAIL_ARRAY_END) { printf( "Too many holes in the physical address space, giving up\n"); pa_indx--; full = TRUE; goto do_dump_avail; } phys_avail[pa_indx++] = pa; /* start */ phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ } physmem++; do_dump_avail: if (dump_avail[da_indx] == pa) { dump_avail[da_indx] += PAGE_SIZE; } else { da_indx++; if (da_indx == DUMP_AVAIL_ARRAY_END) { da_indx--; goto do_next; } dump_avail[da_indx++] = pa; /* start */ dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ } do_next: if (full) break; } } *pte = 0; invltlb(); if (memtest != 0) printf("\n"); /* * XXX * The last chunk must contain at least one page plus the message * buffer to avoid complicating other code (message buffer address * calculation, etc.). */ while (phys_avail[pa_indx - 1] + PAGE_SIZE + round_page(msgbufsize) >= phys_avail[pa_indx]) { physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); phys_avail[pa_indx--] = 0; phys_avail[pa_indx--] = 0; } Maxmem = atop(phys_avail[pa_indx]); /* Trim off space for the message buffer. */ phys_avail[pa_indx] -= round_page(msgbufsize); /* Map the message buffer. */ msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]); } static caddr_t native_parse_preload_data(u_int64_t modulep) { caddr_t kmdp; char *envp; #ifdef DDB vm_offset_t ksym_start; vm_offset_t ksym_end; #endif preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); preload_bootstrap_relocate(KERNBASE); kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); if (envp != NULL) envp += KERNBASE; init_static_kenv(envp, 0); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t); return (kmdp); } static void amd64_kdb_init(void) { kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif } /* Set up the fast syscall stuff */ void amd64_conf_fast_syscall(void) { uint64_t msr; msr = rdmsr(MSR_EFER) | EFER_SCE; wrmsr(MSR_EFER, msr); wrmsr(MSR_LSTAR, pti ? (u_int64_t)IDTVEC(fast_syscall_pti) : (u_int64_t)IDTVEC(fast_syscall)); wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); wrmsr(MSR_STAR, msr); wrmsr(MSR_SF_MASK, PSL_NT | PSL_T | PSL_I | PSL_C | PSL_D | PSL_AC); } u_int64_t hammer_time(u_int64_t modulep, u_int64_t physfree) { caddr_t kmdp; int gsel_tss, x; struct pcpu *pc; struct nmi_pcpu *np; struct xstate_hdr *xhdr; u_int64_t rsp0; char *env; size_t kstack0_sz; int late_console; TSRAW(&thread0, TS_ENTER, __func__, NULL); kmdp = init_ops.parse_preload_data(modulep); physfree += ucode_load_bsp(physfree + KERNBASE); physfree = roundup2(physfree, PAGE_SIZE); identify_cpu1(); identify_hypervisor(); + identify_cpu_fixup_bsp(); identify_cpu2(); initializecpucache(); /* * Check for pti, pcid, and invpcid before ifuncs are * resolved, to correctly select the implementation for * pmap_activate_sw_mode(). */ pti = pti_get_default(); TUNABLE_INT_FETCH("vm.pmap.pti", &pti); TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled); if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) { invpcid_works = (cpu_stdext_feature & CPUID_STDEXT_INVPCID) != 0; } else { pmap_pcid_enabled = 0; } link_elf_ireloc(kmdp); /* * This may be done better later if it gets more high level * components in it. If so just link td->td_proc here. */ proc_linkup0(&proc0, &thread0); /* Init basic tunables, hz etc */ init_param1(); thread0.td_kstack = physfree + KERNBASE; thread0.td_kstack_pages = kstack_pages; kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE; bzero((void *)thread0.td_kstack, kstack0_sz); physfree += kstack0_sz; /* * make gdt memory segments */ for (x = 0; x < NGDT; x++) { if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) && x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1) ssdtosd(&gdt_segs[x], &gdt[x]); } gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; ssdtosyssd(&gdt_segs[GPROC0_SEL], (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; r_gdt.rd_base = (long) gdt; lgdt(&r_gdt); pc = &__pcpu[0]; wrmsr(MSR_FSBASE, 0); /* User value */ wrmsr(MSR_GSBASE, (u_int64_t)pc); wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ pcpu_init(pc, 0, sizeof(struct pcpu)); dpcpu_init((void *)(physfree + KERNBASE), 0); physfree += DPCPU_SIZE; PCPU_SET(prvspace, pc); PCPU_SET(curthread, &thread0); /* Non-late cninit() and printf() can be moved up to here. */ PCPU_SET(tssp, &common_tss[0]); PCPU_SET(commontssp, &common_tss[0]); PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]); PCPU_SET(fs32p, &gdt[GUFS32_SEL]); PCPU_SET(gs32p, &gdt[GUGS32_SEL]); /* * Initialize mutexes. * * icu_lock: in order to allow an interrupt to occur in a critical * section, to set pcpu->ipending (etc...) properly, we * must be able to get the icu lock, so it can't be * under witness. */ mutex_init(); mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF); /* exceptions */ for (x = 0; x < NIDT; x++) setidt(x, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_DE, pti ? &IDTVEC(div_pti) : &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 4); setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2); setidt(IDT_BP, pti ? &IDTVEC(bpt_pti) : &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); setidt(IDT_OF, pti ? &IDTVEC(ofl_pti) : &IDTVEC(ofl), SDT_SYSIGT, SEL_UPL, 0); setidt(IDT_BR, pti ? &IDTVEC(bnd_pti) : &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_UD, pti ? &IDTVEC(ill_pti) : &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_NM, pti ? &IDTVEC(dna_pti) : &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); setidt(IDT_FPUGP, pti ? &IDTVEC(fpusegm_pti) : &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_TS, pti ? &IDTVEC(tss_pti) : &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_NP, pti ? &IDTVEC(missing_pti) : &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_SS, pti ? &IDTVEC(stk_pti) : &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_GP, pti ? &IDTVEC(prot_pti) : &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_PF, pti ? &IDTVEC(page_pti) : &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_MF, pti ? &IDTVEC(fpu_pti) : &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_AC, pti ? &IDTVEC(align_pti) : &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 3); setidt(IDT_XF, pti ? &IDTVEC(xmm_pti) : &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); #ifdef KDTRACE_HOOKS setidt(IDT_DTRACE_RET, pti ? &IDTVEC(dtrace_ret_pti) : &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0); #endif #ifdef XENHVM setidt(IDT_EVTCHN, pti ? &IDTVEC(xen_intr_upcall_pti) : &IDTVEC(xen_intr_upcall), SDT_SYSIGT, SEL_KPL, 0); #endif r_idt.rd_limit = sizeof(idt0) - 1; r_idt.rd_base = (long) idt; lidt(&r_idt); /* * Initialize the clock before the console so that console * initialization can use DELAY(). */ clock_init(); /* * Use vt(4) by default for UEFI boot (during the sc(4)/vt(4) * transition). * Once bootblocks have updated, we can test directly for * efi_systbl != NULL here... */ if (preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL) vty_set_preferred(VTY_VT); TUNABLE_INT_FETCH("hw.ibrs_disable", &hw_ibrs_disable); TUNABLE_INT_FETCH("hw.spec_store_bypass_disable", &hw_ssb_disable); TUNABLE_INT_FETCH("machdep.syscall_ret_l1d_flush", &syscall_ret_l1d_flush_mode); finishidentcpu(); /* Final stage of CPU initialization */ initializecpu(); /* Initialize CPU registers */ /* doublefault stack space, runs on ist1 */ common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; /* * NMI stack, runs on ist2. The pcpu pointer is stored just * above the start of the ist2 stack. */ np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1; np->np_pcpu = (register_t) pc; common_tss[0].tss_ist2 = (long) np; /* * MC# stack, runs on ist3. The pcpu pointer is stored just * above the start of the ist3 stack. */ np = ((struct nmi_pcpu *) &mce0_stack[sizeof(mce0_stack)]) - 1; np->np_pcpu = (register_t) pc; common_tss[0].tss_ist3 = (long) np; /* * DB# stack, runs on ist4. */ np = ((struct nmi_pcpu *) &dbg0_stack[sizeof(dbg0_stack)]) - 1; np->np_pcpu = (register_t) pc; common_tss[0].tss_ist4 = (long) np; /* Set the IO permission bitmap (empty due to tss seg limit) */ common_tss[0].tss_iobase = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE; gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); ltr(gsel_tss); amd64_conf_fast_syscall(); /* * Temporary forge some valid pointer to PCB, for exception * handlers. It is reinitialized properly below after FPU is * set up. Also set up td_critnest to short-cut the page * fault handler. */ cpu_max_ext_state_size = sizeof(struct savefpu); thread0.td_pcb = get_pcb_td(&thread0); thread0.td_critnest = 1; /* * The console and kdb should be initialized even earlier than here, * but some console drivers don't work until after getmemsize(). * Default to late console initialization to support these drivers. * This loses mainly printf()s in getmemsize() and early debugging. */ late_console = 1; TUNABLE_INT_FETCH("debug.late_console", &late_console); if (!late_console) { cninit(); amd64_kdb_init(); } getmemsize(kmdp, physfree); init_param2(physmem); /* now running on new page tables, configured,and u/iom is accessible */ #ifdef DEV_PCI /* This call might adjust phys_avail[]. */ pci_early_quirks(); #endif if (late_console) cninit(); #ifdef DEV_ISA #ifdef DEV_ATPIC elcr_probe(); atpic_startup(); #else /* Reset and mask the atpics and leave them shut down. */ atpic_reset(); /* * Point the ICU spurious interrupt vectors at the APIC spurious * interrupt handler. */ setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); #endif #else #error "have you forgotten the isa device?"; #endif if (late_console) amd64_kdb_init(); msgbufinit(msgbufp, msgbufsize); fpuinit(); /* * Set up thread0 pcb after fpuinit calculated pcb + fpu save * area size. Zero out the extended state header in fpu save * area. */ thread0.td_pcb = get_pcb_td(&thread0); thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0); bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size); if (use_xsave) { xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) + 1); xhdr->xstate_bv = xsave_mask; } /* make an initial tss so cpu can get interrupt stack on syscall! */ rsp0 = (vm_offset_t)thread0.td_pcb; /* Ensure the stack is aligned to 16 bytes */ rsp0 &= ~0xFul; common_tss[0].tss_rsp0 = rsp0; PCPU_SET(rsp0, rsp0); PCPU_SET(pti_rsp0, ((vm_offset_t)PCPU_PTR(pti_stack) + PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful); PCPU_SET(curpcb, thread0.td_pcb); /* transfer to user mode */ _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); _udatasel = GSEL(GUDATA_SEL, SEL_UPL); _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); _ufssel = GSEL(GUFS32_SEL, SEL_UPL); _ugssel = GSEL(GUGS32_SEL, SEL_UPL); load_ds(_udatasel); load_es(_udatasel); load_fs(_ufssel); /* setup proc 0's pcb */ thread0.td_pcb->pcb_flags = 0; thread0.td_frame = &proc0_tf; env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); cpu_probe_amdc1e(); #ifdef FDT x86_init_fdt(); #endif thread0.td_critnest = 0; TSEXIT(); /* Location of kernel stack for locore */ return ((u_int64_t)thread0.td_pcb); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { pcpu->pc_acpi_id = 0xffffffff; } static int smap_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct bios_smap *smapbase; struct bios_smap_xattr smap; caddr_t kmdp; uint32_t *smapattr; int count, error, i; /* Retrieve the system memory map from the loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); smapbase = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP); if (smapbase == NULL) return (0); smapattr = (uint32_t *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP_XATTR); count = *((uint32_t *)smapbase - 1) / sizeof(*smapbase); error = 0; for (i = 0; i < count; i++) { smap.base = smapbase[i].base; smap.length = smapbase[i].length; smap.type = smapbase[i].type; if (smapattr != NULL) smap.xattr = smapattr[i]; else smap.xattr = 0; error = SYSCTL_OUT(req, &smap, sizeof(smap)); } return (error); } SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data"); static int efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct efi_map_header *efihdr; caddr_t kmdp; uint32_t efisize; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr == NULL) return (0); efisize = *((uint32_t *)efihdr - 1); return (SYSCTL_OUT(req, efihdr, efisize)); } SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map"); void spinlock_enter(void) { struct thread *td; register_t flags; td = curthread; if (td->td_md.md_spinlock_count == 0) { flags = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_flags = flags; critical_enter(); } else td->td_md.md_spinlock_count++; } void spinlock_exit(void) { struct thread *td; register_t flags; td = curthread; flags = td->td_md.md_saved_flags; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) { critical_exit(); intr_restore(flags); } } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_r12 = tf->tf_r12; pcb->pcb_r13 = tf->tf_r13; pcb->pcb_r14 = tf->tf_r14; pcb->pcb_r15 = tf->tf_r15; pcb->pcb_rbp = tf->tf_rbp; pcb->pcb_rbx = tf->tf_rbx; pcb->pcb_rip = tf->tf_rip; pcb->pcb_rsp = tf->tf_rsp; } int ptrace_set_pc(struct thread *td, unsigned long addr) { td->td_frame->tf_rip = addr; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); return (0); } int ptrace_single_step(struct thread *td) { PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); if ((td->td_frame->tf_rflags & PSL_T) == 0) { td->td_frame->tf_rflags |= PSL_T; td->td_dbgflags |= TDB_STEP; } return (0); } int ptrace_clear_single_step(struct thread *td) { PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); td->td_frame->tf_rflags &= ~PSL_T; td->td_dbgflags &= ~TDB_STEP; return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tp; tp = td->td_frame; return (fill_frame_regs(tp, regs)); } int fill_frame_regs(struct trapframe *tp, struct reg *regs) { regs->r_r15 = tp->tf_r15; regs->r_r14 = tp->tf_r14; regs->r_r13 = tp->tf_r13; regs->r_r12 = tp->tf_r12; regs->r_r11 = tp->tf_r11; regs->r_r10 = tp->tf_r10; regs->r_r9 = tp->tf_r9; regs->r_r8 = tp->tf_r8; regs->r_rdi = tp->tf_rdi; regs->r_rsi = tp->tf_rsi; regs->r_rbp = tp->tf_rbp; regs->r_rbx = tp->tf_rbx; regs->r_rdx = tp->tf_rdx; regs->r_rcx = tp->tf_rcx; regs->r_rax = tp->tf_rax; regs->r_rip = tp->tf_rip; regs->r_cs = tp->tf_cs; regs->r_rflags = tp->tf_rflags; regs->r_rsp = tp->tf_rsp; regs->r_ss = tp->tf_ss; if (tp->tf_flags & TF_HASSEGS) { regs->r_ds = tp->tf_ds; regs->r_es = tp->tf_es; regs->r_fs = tp->tf_fs; regs->r_gs = tp->tf_gs; } else { regs->r_ds = 0; regs->r_es = 0; regs->r_fs = 0; regs->r_gs = 0; } return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tp; register_t rflags; tp = td->td_frame; rflags = regs->r_rflags & 0xffffffff; if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs)) return (EINVAL); tp->tf_r15 = regs->r_r15; tp->tf_r14 = regs->r_r14; tp->tf_r13 = regs->r_r13; tp->tf_r12 = regs->r_r12; tp->tf_r11 = regs->r_r11; tp->tf_r10 = regs->r_r10; tp->tf_r9 = regs->r_r9; tp->tf_r8 = regs->r_r8; tp->tf_rdi = regs->r_rdi; tp->tf_rsi = regs->r_rsi; tp->tf_rbp = regs->r_rbp; tp->tf_rbx = regs->r_rbx; tp->tf_rdx = regs->r_rdx; tp->tf_rcx = regs->r_rcx; tp->tf_rax = regs->r_rax; tp->tf_rip = regs->r_rip; tp->tf_cs = regs->r_cs; tp->tf_rflags = rflags; tp->tf_rsp = regs->r_rsp; tp->tf_ss = regs->r_ss; if (0) { /* XXXKIB */ tp->tf_ds = regs->r_ds; tp->tf_es = regs->r_es; tp->tf_fs = regs->r_fs; tp->tf_gs = regs->r_gs; tp->tf_flags = TF_HASSEGS; } set_pcb_flags(td->td_pcb, PCB_FULL_IRET); return (0); } /* XXX check all this stuff! */ /* externalize from sv_xmm */ static void fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) { struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; struct envxmm *penv_xmm = &sv_xmm->sv_env; int i; /* pcb -> fpregs */ bzero(fpregs, sizeof(*fpregs)); /* FPU control/status */ penv_fpreg->en_cw = penv_xmm->en_cw; penv_fpreg->en_sw = penv_xmm->en_sw; penv_fpreg->en_tw = penv_xmm->en_tw; penv_fpreg->en_opcode = penv_xmm->en_opcode; penv_fpreg->en_rip = penv_xmm->en_rip; penv_fpreg->en_rdp = penv_xmm->en_rdp; penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; /* FPU registers */ for (i = 0; i < 8; ++i) bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); /* SSE registers */ for (i = 0; i < 16; ++i) bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); } /* internalize from fpregs into sv_xmm */ static void set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) { struct envxmm *penv_xmm = &sv_xmm->sv_env; struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; int i; /* fpregs -> pcb */ /* FPU control/status */ penv_xmm->en_cw = penv_fpreg->en_cw; penv_xmm->en_sw = penv_fpreg->en_sw; penv_xmm->en_tw = penv_fpreg->en_tw; penv_xmm->en_opcode = penv_fpreg->en_opcode; penv_xmm->en_rip = penv_fpreg->en_rip; penv_xmm->en_rdp = penv_fpreg->en_rdp; penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask; /* FPU registers */ for (i = 0; i < 8; ++i) bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); /* SSE registers */ for (i = 0; i < 16; ++i) bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); } /* externalize from td->pcb */ int fill_fpregs(struct thread *td, struct fpreg *fpregs) { KASSERT(td == curthread || TD_IS_SUSPENDED(td) || P_SHOULDSTOP(td->td_proc), ("not suspended thread %p", td)); fpugetregs(td); fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs); return (0); } /* internalize to td->pcb */ int set_fpregs(struct thread *td, struct fpreg *fpregs) { critical_enter(); set_fpregs_xmm(fpregs, get_pcb_user_save_td(td)); fpuuserinited(td); critical_exit(); return (0); } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int flags) { struct pcb *pcb; struct trapframe *tp; pcb = td->td_pcb; tp = td->td_frame; PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(tp->tf_rsp); PROC_UNLOCK(curthread->td_proc); mcp->mc_r15 = tp->tf_r15; mcp->mc_r14 = tp->tf_r14; mcp->mc_r13 = tp->tf_r13; mcp->mc_r12 = tp->tf_r12; mcp->mc_r11 = tp->tf_r11; mcp->mc_r10 = tp->tf_r10; mcp->mc_r9 = tp->tf_r9; mcp->mc_r8 = tp->tf_r8; mcp->mc_rdi = tp->tf_rdi; mcp->mc_rsi = tp->tf_rsi; mcp->mc_rbp = tp->tf_rbp; mcp->mc_rbx = tp->tf_rbx; mcp->mc_rcx = tp->tf_rcx; mcp->mc_rflags = tp->tf_rflags; if (flags & GET_MC_CLEAR_RET) { mcp->mc_rax = 0; mcp->mc_rdx = 0; mcp->mc_rflags &= ~PSL_C; } else { mcp->mc_rax = tp->tf_rax; mcp->mc_rdx = tp->tf_rdx; } mcp->mc_rip = tp->tf_rip; mcp->mc_cs = tp->tf_cs; mcp->mc_rsp = tp->tf_rsp; mcp->mc_ss = tp->tf_ss; mcp->mc_ds = tp->tf_ds; mcp->mc_es = tp->tf_es; mcp->mc_fs = tp->tf_fs; mcp->mc_gs = tp->tf_gs; mcp->mc_flags = tp->tf_flags; mcp->mc_len = sizeof(*mcp); get_fpcontext(td, mcp, NULL, 0); update_pcb_bases(pcb); mcp->mc_fsbase = pcb->pcb_fsbase; mcp->mc_gsbase = pcb->pcb_gsbase; mcp->mc_xfpustate = 0; mcp->mc_xfpustate_len = 0; bzero(mcp->mc_spare, sizeof(mcp->mc_spare)); return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, mcontext_t *mcp) { struct pcb *pcb; struct trapframe *tp; char *xfpustate; long rflags; int ret; pcb = td->td_pcb; tp = td->td_frame; if (mcp->mc_len != sizeof(*mcp) || (mcp->mc_flags & ~_MC_FLAG_MASK) != 0) return (EINVAL); rflags = (mcp->mc_rflags & PSL_USERCHANGE) | (tp->tf_rflags & ~PSL_USERCHANGE); if (mcp->mc_flags & _MC_HASFPXSTATE) { if (mcp->mc_xfpustate_len > cpu_max_ext_state_size - sizeof(struct savefpu)) return (EINVAL); xfpustate = __builtin_alloca(mcp->mc_xfpustate_len); ret = copyin((void *)mcp->mc_xfpustate, xfpustate, mcp->mc_xfpustate_len); if (ret != 0) return (ret); } else xfpustate = NULL; ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len); if (ret != 0) return (ret); tp->tf_r15 = mcp->mc_r15; tp->tf_r14 = mcp->mc_r14; tp->tf_r13 = mcp->mc_r13; tp->tf_r12 = mcp->mc_r12; tp->tf_r11 = mcp->mc_r11; tp->tf_r10 = mcp->mc_r10; tp->tf_r9 = mcp->mc_r9; tp->tf_r8 = mcp->mc_r8; tp->tf_rdi = mcp->mc_rdi; tp->tf_rsi = mcp->mc_rsi; tp->tf_rbp = mcp->mc_rbp; tp->tf_rbx = mcp->mc_rbx; tp->tf_rdx = mcp->mc_rdx; tp->tf_rcx = mcp->mc_rcx; tp->tf_rax = mcp->mc_rax; tp->tf_rip = mcp->mc_rip; tp->tf_rflags = rflags; tp->tf_rsp = mcp->mc_rsp; tp->tf_ss = mcp->mc_ss; tp->tf_flags = mcp->mc_flags; if (tp->tf_flags & TF_HASSEGS) { tp->tf_ds = mcp->mc_ds; tp->tf_es = mcp->mc_es; tp->tf_fs = mcp->mc_fs; tp->tf_gs = mcp->mc_gs; } set_pcb_flags(pcb, PCB_FULL_IRET); if (mcp->mc_flags & _MC_HASBASES) { pcb->pcb_fsbase = mcp->mc_fsbase; pcb->pcb_gsbase = mcp->mc_gsbase; } return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave, size_t xfpusave_len) { size_t max_len, len; mcp->mc_ownedfp = fpugetregs(td); bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0], sizeof(mcp->mc_fpstate)); mcp->mc_fpformat = fpuformat(); if (!use_xsave || xfpusave_len == 0) return; max_len = cpu_max_ext_state_size - sizeof(struct savefpu); len = xfpusave_len; if (len > max_len) { len = max_len; bzero(xfpusave + max_len, len - max_len); } mcp->mc_flags |= _MC_HASFPXSTATE; mcp->mc_xfpustate_len = len; bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len); } static int set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate, size_t xfpustate_len) { int error; if (mcp->mc_fpformat == _MC_FPFMT_NODEV) return (0); else if (mcp->mc_fpformat != _MC_FPFMT_XMM) return (EINVAL); else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) { /* We don't care what state is left in the FPU or PCB. */ fpstate_drop(td); error = 0; } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || mcp->mc_ownedfp == _MC_FPOWNED_PCB) { error = fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate, xfpustate, xfpustate_len); } else return (EINVAL); return (error); } void fpstate_drop(struct thread *td) { KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu")); critical_enter(); if (PCPU_GET(fpcurthread) == td) fpudrop(); /* * XXX force a full drop of the fpu. The above only drops it if we * owned it. * * XXX I don't much like fpugetuserregs()'s semantics of doing a full * drop. Dropping only to the pcb matches fnsave's behaviour. * We only need to drop to !PCB_INITDONE in sendsig(). But * sendsig() is the only caller of fpugetuserregs()... perhaps we just * have too many layers. */ clear_pcb_flags(curthread->td_pcb, PCB_FPUINITDONE | PCB_USERFPUINITDONE); critical_exit(); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { struct pcb *pcb; if (td == NULL) { dbregs->dr[0] = rdr0(); dbregs->dr[1] = rdr1(); dbregs->dr[2] = rdr2(); dbregs->dr[3] = rdr3(); dbregs->dr[6] = rdr6(); dbregs->dr[7] = rdr7(); } else { pcb = td->td_pcb; dbregs->dr[0] = pcb->pcb_dr0; dbregs->dr[1] = pcb->pcb_dr1; dbregs->dr[2] = pcb->pcb_dr2; dbregs->dr[3] = pcb->pcb_dr3; dbregs->dr[6] = pcb->pcb_dr6; dbregs->dr[7] = pcb->pcb_dr7; } dbregs->dr[4] = 0; dbregs->dr[5] = 0; dbregs->dr[8] = 0; dbregs->dr[9] = 0; dbregs->dr[10] = 0; dbregs->dr[11] = 0; dbregs->dr[12] = 0; dbregs->dr[13] = 0; dbregs->dr[14] = 0; dbregs->dr[15] = 0; return (0); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { struct pcb *pcb; int i; if (td == NULL) { load_dr0(dbregs->dr[0]); load_dr1(dbregs->dr[1]); load_dr2(dbregs->dr[2]); load_dr3(dbregs->dr[3]); load_dr6(dbregs->dr[6]); load_dr7(dbregs->dr[7]); } else { /* * Don't let an illegal value for dr7 get set. Specifically, * check for undefined settings. Setting these bit patterns * result in undefined behaviour and can lead to an unexpected * TRCTRAP or a general protection fault right here. * Upper bits of dr6 and dr7 must not be set */ for (i = 0; i < 4; i++) { if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) return (EINVAL); if (td->td_frame->tf_cs == _ucode32sel && DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8) return (EINVAL); } if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 || (dbregs->dr[7] & 0xffffffff00000000ul) != 0) return (EINVAL); pcb = td->td_pcb; /* * Don't let a process set a breakpoint that is not within the * process's address space. If a process could do this, it * could halt the system by setting a breakpoint in the kernel * (if ddb was enabled). Thus, we need to check to make sure * that no breakpoints are being enabled for addresses outside * process's address space. * * XXX - what about when the watched area of the user's * address space is written into from within the kernel * ... wouldn't that still cause a breakpoint to be generated * from within kernel mode? */ if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { /* dr0 is enabled */ if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { /* dr1 is enabled */ if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { /* dr2 is enabled */ if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) return (EINVAL); } if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { /* dr3 is enabled */ if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) return (EINVAL); } pcb->pcb_dr0 = dbregs->dr[0]; pcb->pcb_dr1 = dbregs->dr[1]; pcb->pcb_dr2 = dbregs->dr[2]; pcb->pcb_dr3 = dbregs->dr[3]; pcb->pcb_dr6 = dbregs->dr[6]; pcb->pcb_dr7 = dbregs->dr[7]; set_pcb_flags(pcb, PCB_DBREGS); } return (0); } void reset_dbregs(void) { load_dr7(0); /* Turn off the control bits first */ load_dr0(0); load_dr1(0); load_dr2(0); load_dr3(0); load_dr6(0); } /* * Return > 0 if a hardware breakpoint has been hit, and the * breakpoint was in user space. Return 0, otherwise. */ int user_dbreg_trap(register_t dr6) { u_int64_t dr7; u_int64_t bp; /* breakpoint bits extracted from dr6 */ int nbp; /* number of breakpoints that triggered */ caddr_t addr[4]; /* breakpoint addresses */ int i; bp = dr6 & DBREG_DR6_BMASK; if (bp == 0) { /* * None of the breakpoint bits are set meaning this * trap was not caused by any of the debug registers */ return 0; } dr7 = rdr7(); if ((dr7 & 0x000000ff) == 0) { /* * all GE and LE bits in the dr7 register are zero, * thus the trap couldn't have been caused by the * hardware debug registers */ return 0; } nbp = 0; /* * at least one of the breakpoints were hit, check to see * which ones and if any of them are user space addresses */ if (bp & 0x01) { addr[nbp++] = (caddr_t)rdr0(); } if (bp & 0x02) { addr[nbp++] = (caddr_t)rdr1(); } if (bp & 0x04) { addr[nbp++] = (caddr_t)rdr2(); } if (bp & 0x08) { addr[nbp++] = (caddr_t)rdr3(); } for (i = 0; i < nbp; i++) { if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { /* * addr[i] is in user space */ return nbp; } } /* * None of the breakpoints are in user space. */ return 0; } /* * The pcb_flags is only modified by current thread, or by other threads * when current thread is stopped. However, current thread may change it * from the interrupt context in cpu_switch(), or in the trap handler. * When we read-modify-write pcb_flags from C sources, compiler may generate * code that is not atomic regarding the interrupt handler. If a trap or * interrupt happens and any flag is modified from the handler, it can be * clobbered with the cached value later. Therefore, we implement setting * and clearing flags with single-instruction functions, which do not race * with possible modification of the flags from the trap or interrupt context, * because traps and interrupts are executed only on instruction boundary. */ void set_pcb_flags_raw(struct pcb *pcb, const u_int flags) { __asm __volatile("orl %1,%0" : "=m" (pcb->pcb_flags) : "ir" (flags), "m" (pcb->pcb_flags) : "cc", "memory"); } /* * The support for RDFSBASE, WRFSBASE and similar instructions for %gs * base requires that kernel saves MSR_FSBASE and MSR_{K,}GSBASE into * pcb if user space modified the bases. We must save on the context * switch or if the return to usermode happens through the doreti. * * Tracking of both events is performed by the pcb flag PCB_FULL_IRET, * which have a consequence that the base MSRs must be saved each time * the PCB_FULL_IRET flag is set. We disable interrupts to sync with * context switches. */ static void set_pcb_flags_fsgsbase(struct pcb *pcb, const u_int flags) { register_t r; if (curpcb == pcb && (flags & PCB_FULL_IRET) != 0 && (pcb->pcb_flags & PCB_FULL_IRET) == 0) { r = intr_disable(); if ((pcb->pcb_flags & PCB_FULL_IRET) == 0) { if (rfs() == _ufssel) pcb->pcb_fsbase = rdfsbase(); if (rgs() == _ugssel) pcb->pcb_gsbase = rdmsr(MSR_KGSBASE); } set_pcb_flags_raw(pcb, flags); intr_restore(r); } else { set_pcb_flags_raw(pcb, flags); } } DEFINE_IFUNC(, void, set_pcb_flags, (struct pcb *, const u_int), static) { return ((cpu_stdext_feature & CPUID_STDEXT_FSGSBASE) != 0 ? set_pcb_flags_fsgsbase : set_pcb_flags_raw); } void clear_pcb_flags(struct pcb *pcb, const u_int flags) { __asm __volatile("andl %1,%0" : "=m" (pcb->pcb_flags) : "ir" (~flags), "m" (pcb->pcb_flags) : "cc", "memory"); } #ifdef KDB /* * Provide inb() and outb() as functions. They are normally only available as * inline functions, thus cannot be called from the debugger. */ /* silence compiler warnings */ u_char inb_(u_short); void outb_(u_short, u_char); u_char inb_(u_short port) { return inb(port); } void outb_(u_short port, u_char data) { outb(port, data); } #endif /* KDB */ #undef memset #undef memmove #undef memcpy void *memset_std(void *buf, int c, size_t len); void *memset_erms(void *buf, int c, size_t len); DEFINE_IFUNC(, void *, memset, (void *, int, size_t), static) { return ((cpu_stdext_feature & CPUID_STDEXT_ERMS) != 0 ? memset_erms : memset_std); } void *memmove_std(void * _Nonnull dst, const void * _Nonnull src, size_t len); void *memmove_erms(void * _Nonnull dst, const void * _Nonnull src, size_t len); DEFINE_IFUNC(, void *, memmove, (void * _Nonnull, const void * _Nonnull, size_t), static) { return ((cpu_stdext_feature & CPUID_STDEXT_ERMS) != 0 ? memmove_erms : memmove_std); } void *memcpy_std(void * _Nonnull dst, const void * _Nonnull src, size_t len); void *memcpy_erms(void * _Nonnull dst, const void * _Nonnull src, size_t len); DEFINE_IFUNC(, void *, memcpy, (void * _Nonnull, const void * _Nonnull,size_t), static) { return ((cpu_stdext_feature & CPUID_STDEXT_ERMS) != 0 ? memcpy_erms : memcpy_std); } void pagezero_std(void *addr); void pagezero_erms(void *addr); DEFINE_IFUNC(, void , pagezero, (void *), static) { return ((cpu_stdext_feature & CPUID_STDEXT_ERMS) != 0 ? pagezero_erms : pagezero_std); } Index: projects/clang700-import/sys/amd64/conf/GENERIC =================================================================== --- projects/clang700-import/sys/amd64/conf/GENERIC (revision 340426) +++ projects/clang700-import/sys/amd64/conf/GENERIC (revision 340427) @@ -1,374 +1,379 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/amd64 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu HAMMER ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options NUMA # Non-Uniform Memory Architecture support options PREEMPTION # Enable kernel thread preemption options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC # IP (v4/v6) security options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 options TCP_OFFLOAD # TCP offload options TCP_BLACKBOX # Enhanced TCP event logging options TCP_HHOOK # hhook(9) framework for TCP options TCP_RFC7413 # TCP Fast Open options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options NFSCL # Network Filesystem Client options NFSD # Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization options EFIRT # EFI Runtime Services support options COMPAT_FREEBSD32 # Compatible with i386 binaries options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options COMPAT_FREEBSD6 # Compatible with FreeBSD6 options COMPAT_FREEBSD7 # Compatible with FreeBSD7 options COMPAT_FREEBSD9 # Compatible with FreeBSD9 options COMPAT_FREEBSD10 # Compatible with FreeBSD10 options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel options RACCT # Resource accounting framework options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default options RCTL # Resource limits # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options BUF_TRACKING # Track buffer history options DDB # Support DDB. options FULL_BUF_TRACKING # Track more buffer history options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default # Warning: KUBSAN can result in a kernel too large for loader to load #options KUBSAN # Kernel Undefined Behavior Sanitizer # Kernel dump features. options EKCD # Support for encrypted kernel dumps options GZIO # gzip-compressed kernel and user dumps options ZSTDIO # zstd-compressed kernel and user dumps options NETDUMP # netdump(4) client support # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel options EARLY_AP_STARTUP # CPU frequency control device cpufreq # Bus support. device acpi options ACPI_DMAR device pci options PCI_HP # PCI-Express native HotPlug options PCI_IOV # PCI SR-IOV support # Floppy drives device fdc # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA # SCSI Controllers device ahc # AHA2940 and onboard AIC7xxx devices device ahd # AHA39320/29320 and onboard AIC79xx devices device esp # AMD Am53C974 (Tekram DC-390(T)) device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family #device ispfw # Firmware for QLogic HBAs- normally a module device mpt # LSI-Logic MPT-Fusion device mps # LSI-Logic MPT-Fusion 2 device mpr # LSI-Logic MPT-Fusion 3 device sym # NCR/Symbios Logic device trm # Tekram DC395U/UW/F DC315U adapters device isci # Intel C600 SAS controller device ocs_fc # Emulex FC adapters # ATA/SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) device ses # Enclosure Services (SES and SAF-TE) #device ctl # CAM Target Layer # RAID controllers interfaced to the SCSI subsystem device amr # AMI MegaRAID device arcmsr # Areca SATA II RAID device ciss # Compaq Smart RAID 5* device hptmv # Highpoint RocketRAID 182x device hptnr # Highpoint DC7280, R750 device hptrr # Highpoint RocketRAID 17xx, 22xx, 23xx, 25xx device hpt27xx # Highpoint RocketRAID 27xx device iir # Intel Integrated RAID device ips # IBM (Adaptec) ServeRAID device mly # Mylex AcceleRAID/eXtremeRAID device twa # 3ware 9000 series PATA/SATA RAID device smartpqi # Microsemi smartpqi driver device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller # RAID controllers device aac # Adaptec FSA RAID device aacp # SCSI passthrough for aac (requires CAM) device aacraid # Adaptec by PMC RAID device ida # Compaq Smart RAID device mfi # LSI MegaRAID SAS device mlx # Mylex DAC960 family device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s device pmspcv # PMC-Sierra SAS/SATA Controller driver #XXX pointer/int warnings #device pst # Promise Supertrak SX6000 device twe # 3ware ATA RAID # NVM Express (NVMe) support device nvme # base NVMe driver device nvd # expose NVMe namespaces as disks, depends on nvme # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver options VESA # Add support for VESA BIOS Extensions (VBE) device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc options SC_PIXEL_MODE # add support for the raster text mode # vt is the new video console driver device vt device vt_vga device vt_efifb device agp # support several AGP chipsets # PCCARD (PCMCIA) support # PCMCIA and cardbus bridge support device cbb # cardbus (yenta) bridge device pccard # PC Card (16-bit) bus device cardbus # CardBus (32-bit) bus # Serial (COM) ports device uart # Generic UART driver # Parallel port device ppc device ppbus # Parallel port bus (required) device lpt # Printer device ppi # Parallel port interface device #device vpo # Requires scbus and da device puc # Multi I/O cards and multi-channel UARTs # PCI Ethernet NICs. device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE device de # DEC/Intel DC21x4x (``Tulip'') device em # Intel PRO/1000 Gigabit Ethernet Family device ix # Intel PRO/10GbE PCIE PF Ethernet device ixv # Intel PRO/10GbE PCIE VF Ethernet device ixl # Intel 700 Series Physical Function device iavf # Intel Adaptive Virtual Function device le # AMD Am7900 LANCE and Am79C9xx PCnet device ti # Alteon Networks Tigon I/II gigabit Ethernet device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Ethernet NICs that use the common MII bus controller code. # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs! device miibus # MII bus support device ae # Attansic/Atheros L2 FastEthernet device age # Attansic/Atheros L1 Gigabit Ethernet device alc # Atheros AR8131/AR8132 Ethernet device ale # Atheros AR8121/AR8113/AR8114 Ethernet device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet device bfe # Broadcom BCM440x 10/100 Ethernet device bge # Broadcom BCM570xx Gigabit Ethernet device cas # Sun Cassini/Cassini+ and NS DP83065 Saturn device dc # DEC/Intel 21143 and various workalikes device et # Agere ET1310 10/100/Gigabit Ethernet device fxp # Intel EtherExpress PRO/100B (82557, 82558) device gem # Sun GEM/Sun ERI/Apple GMAC device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device nfe # nVidia nForce MCP on-board Ethernet device nge # NatSemi DP83820 gigabit Ethernet device pcn # AMD Am79C97x PCI 10/100 (precedence over 'le') device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 device sf # Adaptec AIC-6915 (``Starfire'') device sge # Silicon Integrated Systems SiS190/191 device sis # Silicon Integrated Systems SiS 900/SiS 7016 device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet device ste # Sundance ST201 (D-Link DFE-550TX) device stge # Sundance/Tamarack TC9021 gigabit Ethernet device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vge # VIA VT612x gigabit Ethernet device vr # VIA Rhine, Rhine II device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device wlan_amrr # AMRR transmit rate control algorithm device an # Aironet 4500/4800 802.11 wireless NICs. device ath # Atheros NICs device ath_pci # Atheros pci/cardbus glue device ath_hal # pci/cardbus chip support options AH_SUPPORT_AR5416 # enable AR5416 tx/rx descriptors options AH_AR5416_INTERRUPT_MITIGATION # AR5416 interrupt mitigation options ATH_ENABLE_11N # Enable 802.11n support for AR5416 and later device ath_rate_sample # SampleRate tx rate control for ath #device bwi # Broadcom BCM430x/BCM431x wireless NICs. #device bwn # Broadcom BCM43xx wireless NICs. device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device malo # Marvell Libertas wireless NICs. device mwl # Marvell 88W8363 802.11n wireless NICs. device ral # Ralink Technology RT2500 wireless NICs. device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs. device wpi # Intel 3945ABG wireless NICs. # Pseudo devices. device crypto # core crypto support device loop # Network loopback device random # Entropy device device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support options USB_DEBUG # enable debug msgs device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface (USB 2.0) device xhci # XHCI PCI->USB interface (USB 3.0) device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # Sound support device sound # Generic sound driver (required) device snd_cmi # CMedia CMI8338/CMI8738 device snd_csa # Crystal Semiconductor CS461x/428x device snd_emu10kx # Creative SoundBlaster Live! and Audigy device snd_es137x # Ensoniq AudioPCI ES137x device snd_hda # Intel High Definition Audio device snd_ich # Intel, NVidia and other ICH AC'97 Audio device snd_via8233 # VIA VT8233x Audio # MMC/SD device mmc # MMC/SD bus device mmcsd # MMC/SD memory card device sdhci # Generic PCI SD Host Controller # VirtIO support device virtio # Generic VirtIO bus (required) device virtio_pci # VirtIO PCI device device vtnet # VirtIO Ethernet device device virtio_blk # VirtIO Block device device virtio_scsi # VirtIO SCSI device device virtio_balloon # VirtIO Memory Balloon device # HyperV drivers and enhancement support device hyperv # HyperV drivers # Xen HVM Guest Optimizations # NOTE: XENHVM depends on xenpci. They must be added or removed together. options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver # VMware support device vmx # VMware VMXNET3 Ethernet # Netmap provides direct access to TX/RX rings on supported NICs device netmap # netmap(4) support + +# evdev interface +options EVDEV_SUPPORT # evdev support in legacy drivers +device evdev # input event device support +device uinput # install /dev/uinput cdev Index: projects/clang700-import/sys/amd64/conf/MINIMAL =================================================================== --- projects/clang700-import/sys/amd64/conf/MINIMAL (revision 340426) +++ projects/clang700-import/sys/amd64/conf/MINIMAL (revision 340427) @@ -1,149 +1,154 @@ # # MINIMAL -- Mostly Minimal kernel configuration file for FreeBSD/amd64 # # Many definitions of minimal are possible. The one this file follows is # GENERIC, minus all functionality that can be replaced by loading kernel # modules. # # Exceptions: # o While UFS is buildable as a module, the current module lacks # some features (ACL, GJOURNAL) that GENERIC includes. # o acpi as a module has been reported flakey and not well tested, so # is included in the kernel. # o random is included due to uncertaty... # o Many networking things are included # # For now, please run changes to these list past imp@freebsd.org # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu HAMMER ident MINIMAL makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options NUMA # Non-Uniform Memory Architecture support options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols options TCP_OFFLOAD # TCP offload options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options COMPAT_FREEBSD32 # Compatible with i386 binaries options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options COMPAT_FREEBSD6 # Compatible with FreeBSD6 options COMPAT_FREEBSD7 # Compatible with FreeBSD7 options COMPAT_FREEBSD9 # Compatible with FreeBSD9 options COMPAT_FREEBSD10 # Compatible with FreeBSD10 options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options DDB # Support DDB. options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel options EARLY_AP_STARTUP # CPU frequency control device cpufreq # Bus support. device acpi options ACPI_DMAR device pci # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver options VESA # Add support for VESA BIOS Extensions (VBE) device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc options SC_PIXEL_MODE # add support for the raster text mode # vt is the new video console driver device vt device vt_vga device vt_efifb device agp # support several AGP chipsets # Pseudo devices. device loop # Network loopback device random # Entropy device device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device gif # IPv6 and IPv4 tunneling # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # Xen HVM Guest Optimizations # NOTE: XENHVM depends on xenpci. They must be added or removed together. options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver + +# evdev interface +options EVDEV_SUPPORT # evdev support in legacy drivers +device evdev # input event device support +device uinput # install /dev/uinput cdev Index: projects/clang700-import/sys/amd64/conf/NOTES =================================================================== --- projects/clang700-import/sys/amd64/conf/NOTES (revision 340426) +++ projects/clang700-import/sys/amd64/conf/NOTES (revision 340427) @@ -1,678 +1,683 @@ # # NOTES -- Lines that can be cut/pasted into kernel and hints configs. # # This file contains machine dependent kernel configuration notes. For # machine independent notes, look in /sys/conf/NOTES. # # $FreeBSD$ # # # We want LINT to cover profiling as well. profile 2 # # Enable the kernel DTrace hooks which are required to load the DTrace # kernel modules. # options KDTRACE_HOOKS # DTrace core # NOTE: introduces CDDL-licensed components into the kernel #device dtrace # DTrace modules #device dtrace_profile #device dtrace_sdt #device dtrace_fbt #device dtrace_systrace #device dtrace_prototype #device dtnfscl #device dtmalloc # Alternatively include all the DTrace modules #device dtraceall ##################################################################### # SMP OPTIONS: # # Notes: # # IPI_PREEMPTION instructs the kernel to preempt threads running on other # CPUS if needed. Relies on the PREEMPTION option # Optional: options IPI_PREEMPTION device atpic # Optional legacy pic support device mptable # Optional MPSPEC mptable support # # Watchdog routines. # options MP_WATCHDOG # Debugging options. # options COUNT_XINVLTLB_HITS # Counters for TLB events options COUNT_IPIS # Per-CPU IPI interrupt counters ##################################################################### # CPU OPTIONS # # You must specify at least one CPU (the one you intend to run on); # deleting the specification for CPUs you don't need to use may make # parts of the system run faster. # cpu HAMMER # aka K8, aka Opteron & Athlon64 # # Options for CPU features. # ##################################################################### # NETWORKING OPTIONS # # DEVICE_POLLING adds support for mixed interrupt-polling handling # of network device drivers, which has significant benefits in terms # of robustness to overloads and responsivity, as well as permitting # accurate scheduling of the CPU time between kernel network processing # and other activities. The drawback is a moderate (up to 1/HZ seconds) # potential increase in response times. # It is strongly recommended to use HZ=1000 or 2000 with DEVICE_POLLING # to achieve smoother behaviour. # Additionally, you can enable/disable polling at runtime with help of # the ifconfig(8) utility, and select the CPU fraction reserved to # userland with the sysctl variable kern.polling.user_frac # (default 50, range 0..100). # # Not all device drivers support this mode of operation at the time of # this writing. See polling(4) for more details. options DEVICE_POLLING # BPF_JITTER adds support for BPF just-in-time compiler. options BPF_JITTER # OpenFabrics Enterprise Distribution (Infiniband). options OFED options OFED_DEBUG_INIT # Sockets Direct Protocol options SDP options SDP_DEBUG # IP over Infiniband options IPOIB options IPOIB_DEBUG options IPOIB_CM ##################################################################### # CLOCK OPTIONS # Provide read/write access to the memory in the clock chip. device nvram # Access to rtc cmos via /dev/nvram ##################################################################### # MISCELLANEOUS DEVICES AND OPTIONS device speaker #Play IBM BASIC-style noises out your speaker hint.speaker.0.at="isa" hint.speaker.0.port="0x61" device gzip #Exec gzipped a.out's. REQUIRES COMPAT_AOUT! ##################################################################### # HARDWARE BUS CONFIGURATION # # ISA bus # device isa # # Options for `isa': # # AUTO_EOI_1 enables the `automatic EOI' feature for the master 8259A # interrupt controller. This saves about 0.7-1.25 usec for each interrupt. # This option breaks suspend/resume on some portables. # # AUTO_EOI_2 enables the `automatic EOI' feature for the slave 8259A # interrupt controller. This saves about 0.7-1.25 usec for each interrupt. # Automatic EOI is documented not to work for for the slave with the # original i8259A, but it works for some clones and some integrated # versions. # # MAXMEM specifies the amount of RAM on the machine; if this is not # specified, FreeBSD will first read the amount of memory from the CMOS # RAM, so the amount of memory will initially be limited to 64MB or 16MB # depending on the BIOS. If the BIOS reports 64MB, a memory probe will # then attempt to detect the installed amount of RAM. If this probe # fails to detect >64MB RAM you will have to use the MAXMEM option. # The amount is in kilobytes, so for a machine with 128MB of RAM, it would # be 131072 (128 * 1024). # # BROKEN_KEYBOARD_RESET disables the use of the keyboard controller to # reset the CPU for reboot. This is needed on some systems with broken # keyboard controllers. options AUTO_EOI_1 #options AUTO_EOI_2 options MAXMEM=(128*1024) #options BROKEN_KEYBOARD_RESET # # AGP GART support device agp # # AGP debugging. # options AGP_DEBUG ##################################################################### # HARDWARE DEVICE CONFIGURATION # To include support for VGA VESA video modes options VESA # Turn on extra debugging checks and output for VESA support. options VESA_DEBUG device dpms # DPMS suspend & resume via VESA BIOS # x86 real mode BIOS emulator, required by atkbdc/dpms/vesa options X86BIOS # # Optional devices: # # PS/2 mouse device psm hint.psm.0.at="atkbdc" hint.psm.0.irq="12" # Options for psm: options PSM_HOOKRESUME #hook the system resume event, useful #for some laptops options PSM_RESETAFTERSUSPEND #reset the device at the resume event # The keyboard controller; it controls the keyboard and the PS/2 mouse. device atkbdc hint.atkbdc.0.at="isa" hint.atkbdc.0.port="0x060" # The AT keyboard device atkbd hint.atkbd.0.at="atkbdc" hint.atkbd.0.irq="1" # Options for atkbd: options ATKBD_DFLT_KEYMAP # specify the built-in keymap makeoptions ATKBD_DFLT_KEYMAP=fr.dvorak # `flags' for atkbd: # 0x01 Force detection of keyboard, else we always assume a keyboard # 0x02 Don't reset keyboard, useful for some newer ThinkPads # 0x03 Force detection and avoid reset, might help with certain # dockingstations # 0x04 Old-style (XT) keyboard support, useful for older ThinkPads # Video card driver for VGA adapters. device vga hint.vga.0.at="isa" # Options for vga: # Try the following option if the mouse pointer is not drawn correctly # or font does not seem to be loaded properly. May cause flicker on # some systems. options VGA_ALT_SEQACCESS # If you can dispense with some vga driver features, you may want to # use the following options to save some memory. #options VGA_NO_FONT_LOADING # don't save/load font #options VGA_NO_MODE_CHANGE # don't change video modes # Older video cards may require this option for proper operation. options VGA_SLOW_IOACCESS # do byte-wide i/o's to TS and GDC regs # The following option probably won't work with the LCD displays. options VGA_WIDTH90 # support 90 column modes # Debugging. options VGA_DEBUG # vt(4) drivers. device vt_vga # VGA device vt_efifb # EFI framebuffer # Linear framebuffer driver for S3 VESA 1.2 cards. Works on top of VESA. device s3pci # 3Dfx Voodoo Graphics, Voodoo II /dev/3dfx CDEV support. This will create # the /dev/3dfx0 device to work with glide implementations. This should get # linked to /dev/3dfx and /dev/voodoo. Note that this is not the same as # the tdfx DRI module from XFree86 and is completely unrelated. # # To enable Linuxulator support, one must also include COMPAT_LINUX in the # config as well. The other option is to load both as modules. device tdfx # Enable 3Dfx Voodoo support #XXX#device tdfx_linux # Enable Linuxulator support # # ACPI support using the Intel ACPI Component Architecture reference # implementation. # # ACPI_DEBUG enables the use of the debug.acpi.level and debug.acpi.layer # kernel environment variables to select initial debugging levels for the # Intel ACPICA code. (Note that the Intel code must also have USE_DEBUGGER # defined when it is built). device acpi options ACPI_DEBUG # The cpufreq(4) driver provides support for non-ACPI CPU frequency control device cpufreq # Direct Rendering modules for 3D acceleration. device drm # DRM core module required by DRM drivers device mach64drm # ATI Rage Pro, Rage Mobility P/M, Rage XL device mgadrm # AGP Matrox G200, G400, G450, G550 device r128drm # ATI Rage 128 device savagedrm # S3 Savage3D, Savage4 device sisdrm # SiS 300/305, 540, 630 device tdfxdrm # 3dfx Voodoo 3/4/5 and Banshee device viadrm # VIA options DRM_DEBUG # Include debug printfs (slow) # # Network interfaces: # # bxe: Broadcom NetXtreme II (BCM5771X/BCM578XX) PCIe 10Gb Ethernet # adapters. # ed: Western Digital and SMC 80xx; Novell NE1000 and NE2000; 3Com 3C503 # HP PC Lan+, various PC Card devices # (requires miibus) # ipw: Intel PRO/Wireless 2100 IEEE 802.11 adapter # Requires the ipw firmware module # iwi: Intel PRO/Wireless 2200BG/2225BG/2915ABG IEEE 802.11 adapters # Requires the iwi firmware module # iwn: Intel Wireless WiFi Link 1000/105/135/2000/4965/5000/6000/6050 abgn # 802.11 network adapters # Requires the iwn firmware module # mthca: Mellanox HCA InfiniBand # mlx4ib: Mellanox ConnectX HCA InfiniBand # mlx4en: Mellanox ConnectX HCA Ethernet # nfe: nVidia nForce MCP on-board Ethernet Networking (BSD open source) # sfxge: Solarflare SFC9000 family 10Gb Ethernet adapters # vmx: VMware VMXNET3 Ethernet (BSD open source) # wpi: Intel 3945ABG Wireless LAN controller # Requires the wpi firmware module device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE device ed # NE[12]000, SMC Ultra, 3c503, DS8390 cards options ED_3C503 options ED_HPP options ED_SIC device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device ixl # Intel 700 Series Physical Function device iavf # Intel Adaptive Virtual Function device mthca # Mellanox HCA InfiniBand device mlx4 # Shared code module between IB and Ethernet device mlx4ib # Mellanox ConnectX HCA InfiniBand device mlx4en # Mellanox ConnectX HCA Ethernet device nfe # nVidia nForce MCP on-board Ethernet device sfxge # Solarflare SFC9000 10Gb Ethernet device vmx # VMware VMXNET3 Ethernet device wpi # Intel 3945ABG wireless NICs. # IEEE 802.11 adapter firmware modules # Intel PRO/Wireless 2100 firmware: # ipwfw: BSS/IBSS/monitor mode firmware # ipwbssfw: BSS mode firmware # ipwibssfw: IBSS mode firmware # ipwmonitorfw: Monitor mode firmware # Intel PRO/Wireless 2200BG/2225BG/2915ABG firmware: # iwifw: BSS/IBSS/monitor mode firmware # iwibssfw: BSS mode firmware # iwiibssfw: IBSS mode firmware # iwimonitorfw: Monitor mode firmware # Intel Wireless WiFi Link 4965/1000/5000/6000 series firmware: # iwnfw: Single module to support all devices # iwn1000fw: Specific module for the 1000 only # iwn105fw: Specific module for the 105 only # iwn135fw: Specific module for the 135 only # iwn2000fw: Specific module for the 2000 only # iwn2030fw: Specific module for the 2030 only # iwn4965fw: Specific module for the 4965 only # iwn5000fw: Specific module for the 5000 only # iwn5150fw: Specific module for the 5150 only # iwn6000fw: Specific module for the 6000 only # iwn6000g2afw: Specific module for the 6000g2a only # iwn6000g2bfw: Specific module for the 6000g2b only # iwn6050fw: Specific module for the 6050 only # wpifw: Intel 3945ABG Wireless LAN Controller firmware device iwifw device iwibssfw device iwiibssfw device iwimonitorfw device ipwfw device ipwbssfw device ipwibssfw device ipwmonitorfw device iwnfw device iwn1000fw device iwn105fw device iwn135fw device iwn2000fw device iwn2030fw device iwn4965fw device iwn5000fw device iwn5150fw device iwn6000fw device iwn6000g2afw device iwn6000g2bfw device iwn6050fw device wpifw # # Non-Transparent Bridge (NTB) drivers # device if_ntb # Virtual NTB network interface device ntb_transport # NTB packet transport driver device ntb # NTB hardware interface device ntb_hw_intel # Intel NTB hardware driver device ntb_hw_plx # PLX NTB hardware driver # #XXX this stores pointers in a 32bit field that is defined by the hardware #device pst # # Areca 11xx and 12xx series of SATA II RAID controllers. # CAM is required. # device arcmsr # Areca SATA II RAID # # Microsemi smartpqi controllers. # These controllers have a SCSI-like interface, and require the # CAM infrastructure. # device smartpqi # # 3ware 9000 series PATA/SATA RAID controller driver and options. # The driver is implemented as a SIM, and so, needs the CAM infrastructure. # options TWA_DEBUG # 0-10; 10 prints the most messages. device twa # 3ware 9000 series PATA/SATA RAID # # Adaptec FSA RAID controllers, including integrated DELL controllers, # the Dell PERC 2/QC and the HP NetRAID-4M device aac device aacp # SCSI Passthrough interface (optional, CAM required) # # Adaptec by PMC RAID controllers, Series 6/7/8 and upcoming families device aacraid # Container interface, CAM required # # Highpoint RocketRAID 27xx. device hpt27xx # # Highpoint RocketRAID 182x. device hptmv # # Highpoint DC7280 and R750. device hptnr # # Highpoint RocketRAID. Supports RR172x, RR222x, RR2240, RR232x, RR2340, # RR2210, RR174x, RR2522, RR231x, RR230x. device hptrr # # Highpoint RocketRaid 3xxx series SATA RAID device hptiop # # IBM (now Adaptec) ServeRAID controllers device ips # # Intel integrated Memory Controller (iMC) SMBus controller # Sandybridge-Xeon, Ivybridge-Xeon, Haswell-Xeon, Broadwell-Xeon device imcsmb # # Intel C600 (Patsburg) integrated SAS controller device isci options ISCI_LOGGING # enable debugging in isci HAL # # NVM Express (NVMe) support device nvme # base NVMe driver device nvd # expose NVMe namespaces as disks, depends on nvme # # PMC-Sierra SAS/SATA controller device pmspcv # # SafeNet crypto driver: can be moved to the MI NOTES as soon as # it's tested on a big-endian machine # device safe # SafeNet 1141 options SAFE_DEBUG # enable debugging support: hw.safe.debug options SAFE_RNDTEST # enable rndtest support # # VirtIO support # # The virtio entry provides a generic bus for use by the device drivers. # It must be combined with an interface that communicates with the host. # Multiple such interfaces are defined by the VirtIO specification. FreeBSD # only has support for PCI. Therefore, virtio_pci must be statically # compiled in or loaded as a module for the device drivers to function. # device virtio # Generic VirtIO bus (required) device virtio_pci # VirtIO PCI Interface device vtnet # VirtIO Ethernet device device virtio_blk # VirtIO Block device device virtio_scsi # VirtIO SCSI device device virtio_balloon # VirtIO Memory Balloon device device virtio_random # VirtIO Entropy device device virtio_console # VirtIO Console device # Microsoft Hyper-V enhancement support device hyperv # HyperV drivers # Xen HVM Guest Optimizations options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver ##################################################################### # # Miscellaneous hardware: # # ipmi: Intelligent Platform Management Interface # pbio: Parallel (8255 PPI) basic I/O (mode 0) port (e.g. Advantech PCL-724) # smbios: DMI/SMBIOS entry point # vpd: Vital Product Data kernel interface # asmc: Apple System Management Controller # si: Specialix International SI/XIO or SX intelligent serial card # tpm: Trusted Platform Module # Notes on the Specialix SI/XIO driver: # The host card is memory, not IO mapped. # The Rev 1 host cards use a 64K chunk, on a 32K boundary. # The Rev 2 host cards use a 32K chunk, on a 32K boundary. # The cards can use an IRQ of 11, 12 or 15. device ipmi device pbio hint.pbio.0.at="isa" hint.pbio.0.port="0x360" device smbios device vpd device asmc device tpm device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device aesni # AES-NI OpenCrypto module device ioat # Intel I/OAT DMA engine # # Laptop/Notebook options: # # # I2C Bus # # # Hardware watchdog timers: # # ichwd: Intel ICH watchdog timer # amdsbwd: AMD SB7xx watchdog timer # viawd: VIA south bridge watchdog timer # wbwd: Winbond watchdog timer # device ichwd device amdsbwd device viawd device wbwd # # Temperature sensors: # # coretemp: on-die sensor on Intel Core and newer CPUs # amdtemp: on-die sensor on AMD K8/K10/K11 CPUs # device coretemp device amdtemp # # CPU control pseudo-device. Provides access to MSRs, CPUID info and # microcode update feature. # device cpuctl # # System Management Bus (SMB) # options ENABLE_ALART # Control alarm on Intel intpm driver # # AMD System Management Network (SMN) # device amdsmn # # Number of initial kernel page table pages used for early bootstrap. # This number should include enough pages to map the kernel and any # modules or other data loaded with the kernel by the loader. Each # page table page maps 2MB. # options NKPT=31 # EFI Runtime Services support options EFIRT ##################################################################### # ABI Emulation #XXX keep these here for now and reactivate when support for emulating #XXX these 32 bit binaries is added. # Enable 32-bit runtime support for FreeBSD/i386 binaries. options COMPAT_FREEBSD32 # Enable iBCS2 runtime support for SCO and ISC binaries #XXX#options IBCS2 # Emulate spx device for client side of SVR3 local X interface #XXX#options SPX_HACK # Enable (32-bit) a.out binary support options COMPAT_AOUT # Enable 32-bit runtime support for CloudABI binaries. options COMPAT_CLOUDABI32 # Enable 64-bit runtime support for CloudABI binaries. options COMPAT_CLOUDABI64 # Enable Linux ABI emulation #XXX#options COMPAT_LINUX # Enable 32-bit Linux ABI emulation (requires COMPAT_FREEBSD32). options COMPAT_LINUX32 # Enable the linux-like proc filesystem support (requires COMPAT_LINUX32 # and PSEUDOFS) options LINPROCFS #Enable the linux-like sys filesystem support (requires COMPAT_LINUX32 # and PSEUDOFS) options LINSYSFS ##################################################################### +# ZFS support + +options ZFS + +##################################################################### # VM OPTIONS # KSTACK_PAGES is the number of memory pages to assign to the kernel # stack of each thread. options KSTACK_PAGES=5 # Enable detailed accounting by the PV entry allocator. options PV_STATS ##################################################################### # More undocumented options for linting. # Note that documenting these are not considered an affront. options FB_INSTALL_CDEV # install a CDEV entry in /dev options KBDIO_DEBUG=2 options KBD_MAXRETRY=4 options KBD_MAXWAIT=6 options KBD_RESETDELAY=201 options PSM_DEBUG=1 options TIMER_FREQ=((14318182+6)/12) options VM_KMEM_SIZE options VM_KMEM_SIZE_MAX options VM_KMEM_SIZE_SCALE # Enable NDIS binary driver support options NDISAPI device ndis Index: projects/clang700-import/sys/compat/freebsd32/capabilities.conf =================================================================== --- projects/clang700-import/sys/compat/freebsd32/capabilities.conf (revision 340426) +++ projects/clang700-import/sys/compat/freebsd32/capabilities.conf (nonexistent) @@ -1,298 +0,0 @@ -## -## Copyright (c) 2008-2010 Robert N. M. Watson -## Copyright (c) 2016 The FreeBSD Foundation -## All rights reserved. -## -## This software was developed at the University of Cambridge Computer -## Laboratory with support from a grant from Google, Inc. -## -## Portions of this software were developed by Konstantin Belousov -## under sponsorship from the FreeBSD Foundation. -## -## Redistribution and use in source and binary forms, with or without -## modification, are permitted provided that the following conditions -## are met: -## 1. Redistributions of source code must retain the above copyright -## notice, this list of conditions and the following disclaimer. -## 2. Redistributions in binary form must reproduce the above copyright -## notice, this list of conditions and the following disclaimer in the -## documentation and/or other materials provided with the distribution. -## -## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -## ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE -## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -## OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -## LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -## OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -## SUCH DAMAGE. -## -## List of system calls enabled in freebsd32 capability mode, one name -## per line. See the original list in the sys/kern/capabilities.conf. -## Position of the compat syscall in this file must be identical to -## the master, to facilitate comparision and diagnostic. -## -## $FreeBSD$ -## - -__acl_aclcheck_fd -__acl_delete_fd -__acl_get_fd -__acl_set_fd -__mac_get_fd -#__mac_get_pid -__mac_get_proc -__mac_set_fd -__mac_set_proc -freebsd32___sysctl -freebsd32__umtx_op -abort2 -accept -accept4 -aio_cancel -freebsd32_aio_error -freebsd32_aio_fsync -freebsd32_aio_read -freebsd32_aio_return -freebsd32_aio_suspend -freebsd32_aio_waitcomplete -freebsd32_aio_write -#audit -bindat -cap_enter -cap_fcntls_get -cap_fcntls_limit -cap_getmode -freebsd32_cap_ioctls_get -freebsd32_cap_ioctls_limit -__cap_rights_get -cap_rights_limit -freebsd32_clock_getres -freebsd32_clock_gettime -close -closefrom -connectat -#cpuset -freebsd32_cpuset_getaffinity -#freebsd32_cpuset_getid -freebsd32_cpuset_setaffinity -#freebsd32_cpuset_setid -dup -dup2 -extattr_delete_fd -extattr_get_fd -extattr_list_fd -extattr_set_fd -fchflags -fchmod -fchown -freebsd32_fcntl -freebsd32_fexecve -flock -fork -fpathconf -freebsd32_fstat -freebsd32_fstatat -freebsd32_getdirentries -freebsd32_fstatfs -freebsd32_mknodat -freebsd32_ftruncate -freebsd32_lseek -freebsd32_mmap -mmap -freebsd32_pread -freebsd32_pwrite -freebsd32_fstat -fstatfs -fsync -ftruncate -freebsd32_ftruncate -freebsd32_futimens -freebsd32_futimes -getaudit -getaudit_addr -getauid -freebsd32_getcontext -freebsd32_getdents -freebsd32_getdirentries -getdirentries -getdomainname -getdtablesize -getegid -geteuid -gethostid -gethostname -freebsd32_getitimer -getgid -getgroups -getlogin -freebsd32_getpagesize -getpeername -getpgid -getpgrp -getpid -getppid -getpriority -getrandom -getresgid -getresuid -getrlimit -freebsd32_getrusage -getsid -getsockname -getsockopt -freebsd32_gettimeofday -getuid -freebsd32_ioctl -issetugid -freebsd32_kevent -kill -freebsd32_kmq_notify -freebsd32_kmq_setattr -freebsd32_kmq_timedreceive -freebsd32_kmq_timedsend -kqueue -freebsd32_ktimer_create -ktimer_delete -ktimer_getoverrun -freebsd32_ktimer_gettime -freebsd32_ktimer_settime -#ktrace -freebsd32_lio_listio -listen -freebsd32_lseek -madvise -mincore -minherit -mlock -mlockall -freebsd32_mmap -freebsd32_mprotect -msync -munlock -munlockall -munmap -freebsd32_nanosleep -ntp_gettime -freebsd6_freebsd32_aio_read -freebsd6_freebsd32_aio_write -break -freebsd6_freebsd32_lio_listio -chflagsat -faccessat -fchmodat -fchownat -freebsd32_fstatat -freebsd32_futimesat -linkat -mkdirat -mkfifoat -mknodat -openat -readlinkat -renameat -symlinkat -unlinkat -freebsd32_utimensat -pdfork -pdgetpid -pdkill -#pdwait4 # not yet implemented -freebsd32_pipe -pipe2 -poll -freebsd32_ppoll -freebsd32_posix_fallocate -freebsd32_pread -freebsd32_preadv -profil -#ptrace -freebsd32_pwrite -freebsd32_pwritev -read -freebsd32_readv -freebsd6_freebsd32_recv -freebsd32_recvfrom -freebsd32_recvmsg -rtprio -rtprio_thread -sbrk -sched_get_priority_max -sched_get_priority_min -sched_getparam -sched_getscheduler -freebsd32_sched_rr_get_interval -sched_setparam -sched_setscheduler -sched_yield -sctp_generic_recvmsg -sctp_generic_sendmsg -sctp_generic_sendmsg_iov -sctp_peeloff -freebsd32_pselect -freebsd32_select -freebsd6_freebsd32_send -freebsd32_sendfile -freebsd32_sendmsg -sendto -setaudit -setaudit_addr -setauid -freebsd32_setcontext -setegid -seteuid -setgid -freebsd32_setitimer -setpriority -setregid -setresgid -setresuid -setreuid -setrlimit -setsid -setsockopt -setuid -shm_open -shutdown -freebsd32_sigaction -freebsd32_sigaltstack -freebsd32_sigblock -freebsd32_sigpending -sigpending -freebsd32_sigprocmask -sigprocmask -freebsd32_sigqueue -sigqueue -freebsd32_sigreturn -freebsd32_sigsetmask -freebsd32_sigstack -freebsd32_sigsuspend -sigsuspend -freebsd32_sigtimedwait -freebsd32_sigvec -freebsd32_sigwaitinfo -sigwait -socket -socketpair -sstk -sync -sys_exit -freebsd32_sysarch -thr_create -thr_exit -thr_kill -#thr_kill2 -freebsd32_thr_new -thr_self -thr_set_name -freebsd32_thr_suspend -thr_wake -umask -utrace -uuidgen -write -freebsd32_writev -yield Property changes on: projects/clang700-import/sys/compat/freebsd32/capabilities.conf ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: projects/clang700-import/sys/compat/freebsd32/Makefile =================================================================== --- projects/clang700-import/sys/compat/freebsd32/Makefile (revision 340426) +++ projects/clang700-import/sys/compat/freebsd32/Makefile (revision 340427) @@ -1,19 +1,19 @@ # Makefile for syscall tables # # $FreeBSD$ # Don't use an OBJDIR .OBJDIR: ${.CURDIR} all: @echo "make sysent only" sysent: freebsd32_sysent.c freebsd32_syscall.h freebsd32_proto.h freebsd32_systrace_args.c freebsd32_sysent.c freebsd32_syscalls.c freebsd32_syscall.h freebsd32_proto.h freebsd32_systrace_args.c : \ - ../../kern/makesyscalls.sh syscalls.master syscalls.conf capabilities.conf + ../../kern/makesyscalls.sh syscalls.master syscalls.conf ../../kern/capabilities.conf sh ../../kern/makesyscalls.sh syscalls.master syscalls.conf clean: rm -f freebsd32_sysent.c freebsd32_syscalls.c freebsd32_syscall.h freebsd32_proto.h rm -f freebsd32_systrace_args.c Index: projects/clang700-import/sys/compat/freebsd32/syscalls.conf =================================================================== --- projects/clang700-import/sys/compat/freebsd32/syscalls.conf (revision 340426) +++ projects/clang700-import/sys/compat/freebsd32/syscalls.conf (revision 340427) @@ -1,11 +1,13 @@ # $FreeBSD$ sysnames="freebsd32_syscalls.c" sysproto="freebsd32_proto.h" sysproto_h=_FREEBSD32_SYSPROTO_H_ syshdr="freebsd32_syscall.h" syssw="freebsd32_sysent.c" sysmk="/dev/null" syscallprefix="FREEBSD32_SYS_" switchname="freebsd32_sysent" namesname="freebsd32_syscallnames" systrace="freebsd32_systrace_args.c" +abi_func_prefix="freebsd32_" +capabilities_conf="../../kern/capabilities.conf" Index: projects/clang700-import/sys/conf/dtb.mk =================================================================== --- projects/clang700-import/sys/conf/dtb.mk (revision 340426) +++ projects/clang700-import/sys/conf/dtb.mk (revision 340427) @@ -1,106 +1,106 @@ # $FreeBSD$ # # The include file handles building and installing dtb files. # # +++ variables +++ # # DTC The Device Tree Compiler to use # # DTS List of the dts files to build and install. # # DTBDIR Base path for dtb modules [/boot/dtb] # # DTBOWN .dtb file owner. [${BINOWN}] # # DTBGRP .dtb file group. [${BINGRP}] # # DTBMODE Module file mode. [${BINMODE}] # # DESTDIR The tree where the module gets installed. [not set] # # +++ targets +++ # # install: # install the kernel module; if the Makefile # does not itself define the target install, the targets # beforeinstall and afterinstall may also be used to cause # actions immediately before and after the install target # is executed. # .include # Grab all the options for a kernel build. For backwards compat, we need to # do this after bsd.own.mk. .include "kern.opts.mk" DTC?= dtc # Search for kernel source tree in standard places. .for _dir in ${.CURDIR}/../.. ${.CURDIR}/../../.. /sys /usr/src/sys .if !defined(SYSDIR) && exists(${_dir}/kern/) SYSDIR= ${_dir:tA} .endif .endfor .if !defined(SYSDIR) || !exists(${SYSDIR}/kern/) .error "can't find kernel source tree" .endif .SUFFIXES: .dtb .dts .dtbo .dtso .PATH: ${SYSDIR}/gnu/dts/${MACHINE} ${SYSDIR}/dts/${MACHINE} ${SYSDIR}/dts/${MACHINE}/overlays DTB=${DTS:R:S/$/.dtb/} DTBO=${DTSO:R:S/$/.dtbo/} all: ${DTB} ${DTBO} .if defined(DTS) -.export DTC +.export DTC ECHO .for _dts in ${DTS} ${_dts:R:S/$/.dtb/}: ${_dts} ${OP_META} @${ECHO} Generating ${.TARGET} from ${_dts} - @env ECHO=${ECHO} ${SYSDIR}/tools/fdt/make_dtb.sh ${SYSDIR} ${_dts} ${.OBJDIR} + @${SYSDIR}/tools/fdt/make_dtb.sh ${SYSDIR} ${_dts} ${.OBJDIR} CLEANFILES+=${_dts:R:S/$/.dtb/} .endfor .endif .if defined(DTSO) -.export DTC +.export DTC ECHO .for _dtso in ${DTSO} ${_dtso:R:S/$/.dtbo/}: ${_dtso} ${OP_META} @${ECHO} Generating ${.TARGET} from ${_dtso} - @env ECHO=${ECHO} ${SYSDIR}/tools/fdt/make_dtbo.sh ${SYSDIR} overlays/${_dtso} ${.OBJDIR} + @${SYSDIR}/tools/fdt/make_dtbo.sh ${SYSDIR} overlays/${_dtso} ${.OBJDIR} CLEANFILES+=${_dtso:R:S/$/.dtbo/} .endfor .endif .if !target(install) .if !target(realinstall) realinstall: _dtbinstall .ORDER: beforeinstall _kmodinstall _dtbinstall: # Need to create this because installkernel doesn't invoke mtree with BSD.root.mtree # to make sure the tree is setup properly. We don't recreate it to avoid duplicate # entries in the NO_ROOT case. test -d ${DESTDIR}${DTBDIR} || ${INSTALL} -d -o ${DTBOWN} -g ${DTBGRP} ${DESTDIR}${DTBDIR} .for _dtb in ${DTB} .if ${MACHINE_CPUARCH} == "aarch64" test -d ${DESTDIR}${DTBDIR}/${_dtb:H} || ${INSTALL} -d -o ${DTBOWN} -g ${DTBGRP} ${DESTDIR}${DTBDIR}/${_dtb:H} ${INSTALL} -o ${DTBOWN} -g ${DTBGRP} -m ${DTBMODE} \ ${_INSTALLFLAGS} ${_dtb:T} ${DESTDIR}${DTBDIR}/${_dtb:H} .else ${INSTALL} -o ${DTBOWN} -g ${DTBGRP} -m ${DTBMODE} \ ${_INSTALLFLAGS} ${_dtb} ${DESTDIR}${DTBDIR}/ .endif .endfor test -d ${DESTDIR}${DTBODIR} || ${INSTALL} -d -o ${DTBOWN} -g ${DTBGRP} ${DESTDIR}${DTBODIR} .for _dtbo in ${DTBO} ${INSTALL} -o ${DTBOWN} -g ${DTBGRP} -m ${DTBMODE} \ ${_INSTALLFLAGS} ${_dtbo} ${DESTDIR}${DTBODIR}/ .endfor .endif # !target(realinstall) .endif # !target(install) .include .include .include Index: projects/clang700-import/sys/contrib/libnv/nvpair.c =================================================================== --- projects/clang700-import/sys/contrib/libnv/nvpair.c (revision 340426) +++ projects/clang700-import/sys/contrib/libnv/nvpair.c (revision 340427) @@ -1,2148 +1,2134 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2013 The FreeBSD Foundation * Copyright (c) 2013-2015 Mariusz Zaborski * All rights reserved. * * This software was developed by Pawel Jakub Dawidek under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #ifdef _KERNEL #include #include #include #include #include #else #include #include #include #include #include #include #include #include #include "common_impl.h" #endif #ifdef HAVE_PJDLOG #include #endif #include #include "nv_impl.h" #include "nvlist_impl.h" #include "nvpair_impl.h" #ifndef HAVE_PJDLOG #ifdef _KERNEL #define PJDLOG_ASSERT(...) MPASS(__VA_ARGS__) #define PJDLOG_RASSERT(expr, ...) KASSERT(expr, (__VA_ARGS__)) #define PJDLOG_ABORT(...) panic(__VA_ARGS__) #else #include #define PJDLOG_ASSERT(...) assert(__VA_ARGS__) #define PJDLOG_RASSERT(expr, ...) assert(expr) #define PJDLOG_ABORT(...) abort() #endif #endif #define NVPAIR_MAGIC 0x6e7670 /* "nvp" */ struct nvpair { int nvp_magic; char *nvp_name; int nvp_type; uint64_t nvp_data; size_t nvp_datasize; size_t nvp_nitems; /* Used only for array types. */ nvlist_t *nvp_list; TAILQ_ENTRY(nvpair) nvp_next; }; #define NVPAIR_ASSERT(nvp) do { \ PJDLOG_ASSERT((nvp) != NULL); \ PJDLOG_ASSERT((nvp)->nvp_magic == NVPAIR_MAGIC); \ } while (0) struct nvpair_header { uint8_t nvph_type; uint16_t nvph_namesize; uint64_t nvph_datasize; uint64_t nvph_nitems; } __packed; void nvpair_assert(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); } static nvpair_t * nvpair_allocv(const char *name, int type, uint64_t data, size_t datasize, size_t nitems) { nvpair_t *nvp; size_t namelen; PJDLOG_ASSERT(type >= NV_TYPE_FIRST && type <= NV_TYPE_LAST); namelen = strlen(name); if (namelen >= NV_NAME_MAX) { ERRNO_SET(ENAMETOOLONG); return (NULL); } nvp = nv_calloc(1, sizeof(*nvp) + namelen + 1); if (nvp != NULL) { nvp->nvp_name = (char *)(nvp + 1); memcpy(nvp->nvp_name, name, namelen); nvp->nvp_name[namelen] = '\0'; nvp->nvp_type = type; nvp->nvp_data = data; nvp->nvp_datasize = datasize; nvp->nvp_nitems = nitems; nvp->nvp_magic = NVPAIR_MAGIC; } return (nvp); } static int nvpair_append(nvpair_t *nvp, const void *value, size_t valsize, size_t datasize) { void *olddata, *data, *valp; size_t oldlen; oldlen = nvp->nvp_nitems * valsize; olddata = (void *)(uintptr_t)nvp->nvp_data; data = nv_realloc(olddata, oldlen + valsize); if (data == NULL) { ERRNO_SET(ENOMEM); return (-1); } valp = (unsigned char *)data + oldlen; memcpy(valp, value, valsize); nvp->nvp_data = (uint64_t)(uintptr_t)data; nvp->nvp_datasize += datasize; nvp->nvp_nitems++; return (0); } nvlist_t * nvpair_nvlist(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); return (nvp->nvp_list); } nvpair_t * nvpair_next(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_list != NULL); return (TAILQ_NEXT(nvp, nvp_next)); } nvpair_t * nvpair_prev(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_list != NULL); return (TAILQ_PREV(nvp, nvl_head, nvp_next)); } void nvpair_insert(struct nvl_head *head, nvpair_t *nvp, nvlist_t *nvl) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_list == NULL); PJDLOG_ASSERT((nvlist_flags(nvl) & NV_FLAG_NO_UNIQUE) != 0 || !nvlist_exists(nvl, nvpair_name(nvp))); TAILQ_INSERT_TAIL(head, nvp, nvp_next); nvp->nvp_list = nvl; } static void nvpair_remove_nvlist(nvpair_t *nvp) { nvlist_t *nvl; /* XXX: DECONST is bad, mkay? */ nvl = __DECONST(nvlist_t *, nvpair_get_nvlist(nvp)); PJDLOG_ASSERT(nvl != NULL); nvlist_set_parent(nvl, NULL); } static void nvpair_remove_nvlist_array(nvpair_t *nvp) { nvlist_t **nvlarray; size_t count, i; /* XXX: DECONST is bad, mkay? */ nvlarray = __DECONST(nvlist_t **, nvpair_get_nvlist_array(nvp, &count)); for (i = 0; i < count; i++) { nvlist_set_array_next(nvlarray[i], NULL); nvlist_set_parent(nvlarray[i], NULL); } } void nvpair_remove(struct nvl_head *head, nvpair_t *nvp, const nvlist_t *nvl) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_list == nvl); if (nvpair_type(nvp) == NV_TYPE_NVLIST) nvpair_remove_nvlist(nvp); else if (nvpair_type(nvp) == NV_TYPE_NVLIST_ARRAY) nvpair_remove_nvlist_array(nvp); TAILQ_REMOVE(head, nvp, nvp_next); nvp->nvp_list = NULL; } nvpair_t * nvpair_clone(const nvpair_t *nvp) { nvpair_t *newnvp; const char *name; const void *data; size_t datasize; NVPAIR_ASSERT(nvp); name = nvpair_name(nvp); switch (nvpair_type(nvp)) { case NV_TYPE_NULL: newnvp = nvpair_create_null(name); break; case NV_TYPE_BOOL: newnvp = nvpair_create_bool(name, nvpair_get_bool(nvp)); break; case NV_TYPE_NUMBER: newnvp = nvpair_create_number(name, nvpair_get_number(nvp)); break; case NV_TYPE_STRING: newnvp = nvpair_create_string(name, nvpair_get_string(nvp)); break; case NV_TYPE_NVLIST: newnvp = nvpair_create_nvlist(name, nvpair_get_nvlist(nvp)); break; case NV_TYPE_BINARY: data = nvpair_get_binary(nvp, &datasize); newnvp = nvpair_create_binary(name, data, datasize); break; case NV_TYPE_BOOL_ARRAY: data = nvpair_get_bool_array(nvp, &datasize); newnvp = nvpair_create_bool_array(name, data, datasize); break; case NV_TYPE_NUMBER_ARRAY: data = nvpair_get_number_array(nvp, &datasize); newnvp = nvpair_create_number_array(name, data, datasize); break; case NV_TYPE_STRING_ARRAY: data = nvpair_get_string_array(nvp, &datasize); newnvp = nvpair_create_string_array(name, data, datasize); break; case NV_TYPE_NVLIST_ARRAY: data = nvpair_get_nvlist_array(nvp, &datasize); newnvp = nvpair_create_nvlist_array(name, data, datasize); break; #ifndef _KERNEL case NV_TYPE_DESCRIPTOR: newnvp = nvpair_create_descriptor(name, nvpair_get_descriptor(nvp)); break; case NV_TYPE_DESCRIPTOR_ARRAY: data = nvpair_get_descriptor_array(nvp, &datasize); newnvp = nvpair_create_descriptor_array(name, data, datasize); break; #endif default: PJDLOG_ABORT("Unknown type: %d.", nvpair_type(nvp)); } return (newnvp); } size_t nvpair_header_size(void) { return (sizeof(struct nvpair_header)); } size_t nvpair_size(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); return (nvp->nvp_datasize); } unsigned char * nvpair_pack_header(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp) { struct nvpair_header nvphdr; size_t namesize; NVPAIR_ASSERT(nvp); nvphdr.nvph_type = nvp->nvp_type; namesize = strlen(nvp->nvp_name) + 1; PJDLOG_ASSERT(namesize > 0 && namesize <= UINT16_MAX); nvphdr.nvph_namesize = namesize; nvphdr.nvph_datasize = nvp->nvp_datasize; nvphdr.nvph_nitems = nvp->nvp_nitems; PJDLOG_ASSERT(*leftp >= sizeof(nvphdr)); memcpy(ptr, &nvphdr, sizeof(nvphdr)); ptr += sizeof(nvphdr); *leftp -= sizeof(nvphdr); PJDLOG_ASSERT(*leftp >= namesize); memcpy(ptr, nvp->nvp_name, namesize); ptr += namesize; *leftp -= namesize; return (ptr); } unsigned char * nvpair_pack_null(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp __unused) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NULL); return (ptr); } unsigned char * nvpair_pack_bool(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp) { uint8_t value; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BOOL); value = (uint8_t)nvp->nvp_data; PJDLOG_ASSERT(*leftp >= sizeof(value)); memcpy(ptr, &value, sizeof(value)); ptr += sizeof(value); *leftp -= sizeof(value); return (ptr); } unsigned char * nvpair_pack_number(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp) { uint64_t value; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NUMBER); value = (uint64_t)nvp->nvp_data; PJDLOG_ASSERT(*leftp >= sizeof(value)); memcpy(ptr, &value, sizeof(value)); ptr += sizeof(value); *leftp -= sizeof(value); return (ptr); } unsigned char * nvpair_pack_string(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_STRING); PJDLOG_ASSERT(*leftp >= nvp->nvp_datasize); memcpy(ptr, (const void *)(intptr_t)nvp->nvp_data, nvp->nvp_datasize); ptr += nvp->nvp_datasize; *leftp -= nvp->nvp_datasize; return (ptr); } unsigned char * nvpair_pack_nvlist_up(unsigned char *ptr, size_t *leftp) { struct nvpair_header nvphdr; size_t namesize; const char *name = ""; namesize = 1; nvphdr.nvph_type = NV_TYPE_NVLIST_UP; nvphdr.nvph_namesize = namesize; nvphdr.nvph_datasize = 0; nvphdr.nvph_nitems = 0; PJDLOG_ASSERT(*leftp >= sizeof(nvphdr)); memcpy(ptr, &nvphdr, sizeof(nvphdr)); ptr += sizeof(nvphdr); *leftp -= sizeof(nvphdr); PJDLOG_ASSERT(*leftp >= namesize); memcpy(ptr, name, namesize); ptr += namesize; *leftp -= namesize; return (ptr); } unsigned char * nvpair_pack_nvlist_array_next(unsigned char *ptr, size_t *leftp) { struct nvpair_header nvphdr; size_t namesize; const char *name = ""; namesize = 1; nvphdr.nvph_type = NV_TYPE_NVLIST_ARRAY_NEXT; nvphdr.nvph_namesize = namesize; nvphdr.nvph_datasize = 0; nvphdr.nvph_nitems = 0; PJDLOG_ASSERT(*leftp >= sizeof(nvphdr)); memcpy(ptr, &nvphdr, sizeof(nvphdr)); ptr += sizeof(nvphdr); *leftp -= sizeof(nvphdr); PJDLOG_ASSERT(*leftp >= namesize); memcpy(ptr, name, namesize); ptr += namesize; *leftp -= namesize; return (ptr); } #ifndef _KERNEL unsigned char * nvpair_pack_descriptor(const nvpair_t *nvp, unsigned char *ptr, int64_t *fdidxp, size_t *leftp) { int64_t value; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_DESCRIPTOR); value = (int64_t)nvp->nvp_data; if (value != -1) { /* * If there is a real descriptor here, we change its number * to position in the array of descriptors send via control * message. */ PJDLOG_ASSERT(fdidxp != NULL); value = *fdidxp; (*fdidxp)++; } PJDLOG_ASSERT(*leftp >= sizeof(value)); memcpy(ptr, &value, sizeof(value)); ptr += sizeof(value); *leftp -= sizeof(value); return (ptr); } #endif unsigned char * nvpair_pack_binary(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BINARY); PJDLOG_ASSERT(*leftp >= nvp->nvp_datasize); memcpy(ptr, (const void *)(intptr_t)nvp->nvp_data, nvp->nvp_datasize); ptr += nvp->nvp_datasize; *leftp -= nvp->nvp_datasize; return (ptr); } unsigned char * nvpair_pack_bool_array(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BOOL_ARRAY); PJDLOG_ASSERT(*leftp >= nvp->nvp_datasize); memcpy(ptr, (const void *)(intptr_t)nvp->nvp_data, nvp->nvp_datasize); ptr += nvp->nvp_datasize; *leftp -= nvp->nvp_datasize; return (ptr); } unsigned char * nvpair_pack_number_array(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NUMBER_ARRAY); PJDLOG_ASSERT(*leftp >= nvp->nvp_datasize); memcpy(ptr, (const void *)(intptr_t)nvp->nvp_data, nvp->nvp_datasize); ptr += nvp->nvp_datasize; *leftp -= nvp->nvp_datasize; return (ptr); } unsigned char * nvpair_pack_string_array(const nvpair_t *nvp, unsigned char *ptr, size_t *leftp) { unsigned int ii; size_t size, len; const char * const *array; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_STRING_ARRAY); PJDLOG_ASSERT(*leftp >= nvp->nvp_datasize); size = 0; array = nvpair_get_string_array(nvp, NULL); PJDLOG_ASSERT(array != NULL); for (ii = 0; ii < nvp->nvp_nitems; ii++) { len = strlen(array[ii]) + 1; PJDLOG_ASSERT(*leftp >= len); memcpy(ptr, (const void *)array[ii], len); size += len; ptr += len; *leftp -= len; } PJDLOG_ASSERT(size == nvp->nvp_datasize); return (ptr); } #ifndef _KERNEL unsigned char * nvpair_pack_descriptor_array(const nvpair_t *nvp, unsigned char *ptr, int64_t *fdidxp, size_t *leftp) { int64_t value; const int *array; unsigned int ii; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_DESCRIPTOR_ARRAY); PJDLOG_ASSERT(*leftp >= nvp->nvp_datasize); array = nvpair_get_descriptor_array(nvp, NULL); PJDLOG_ASSERT(array != NULL); for (ii = 0; ii < nvp->nvp_nitems; ii++) { PJDLOG_ASSERT(*leftp >= sizeof(value)); value = array[ii]; if (value != -1) { /* * If there is a real descriptor here, we change its * number to position in the array of descriptors send * via control message. */ PJDLOG_ASSERT(fdidxp != NULL); value = *fdidxp; (*fdidxp)++; } memcpy(ptr, &value, sizeof(value)); ptr += sizeof(value); *leftp -= sizeof(value); } return (ptr); } #endif void nvpair_init_datasize(nvpair_t *nvp) { NVPAIR_ASSERT(nvp); if (nvp->nvp_type == NV_TYPE_NVLIST) { if (nvp->nvp_data == 0) { nvp->nvp_datasize = 0; } else { nvp->nvp_datasize = nvlist_size((const nvlist_t *)(intptr_t)nvp->nvp_data); } } } const unsigned char * nvpair_unpack_header(bool isbe, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp) { struct nvpair_header nvphdr; if (*leftp < sizeof(nvphdr)) goto fail; memcpy(&nvphdr, ptr, sizeof(nvphdr)); ptr += sizeof(nvphdr); *leftp -= sizeof(nvphdr); #if NV_TYPE_FIRST > 0 if (nvphdr.nvph_type < NV_TYPE_FIRST) goto fail; #endif if (nvphdr.nvph_type > NV_TYPE_LAST && nvphdr.nvph_type != NV_TYPE_NVLIST_UP && nvphdr.nvph_type != NV_TYPE_NVLIST_ARRAY_NEXT) { goto fail; } #if BYTE_ORDER == BIG_ENDIAN if (!isbe) { nvphdr.nvph_namesize = le16toh(nvphdr.nvph_namesize); nvphdr.nvph_datasize = le64toh(nvphdr.nvph_datasize); } #else if (isbe) { nvphdr.nvph_namesize = be16toh(nvphdr.nvph_namesize); nvphdr.nvph_datasize = be64toh(nvphdr.nvph_datasize); } #endif if (nvphdr.nvph_namesize > NV_NAME_MAX) goto fail; if (*leftp < nvphdr.nvph_namesize) goto fail; if (nvphdr.nvph_namesize < 1) goto fail; if (strnlen((const char *)ptr, nvphdr.nvph_namesize) != (size_t)(nvphdr.nvph_namesize - 1)) { goto fail; } memcpy(nvp->nvp_name, ptr, nvphdr.nvph_namesize); ptr += nvphdr.nvph_namesize; *leftp -= nvphdr.nvph_namesize; if (*leftp < nvphdr.nvph_datasize) goto fail; nvp->nvp_type = nvphdr.nvph_type; nvp->nvp_data = 0; nvp->nvp_datasize = nvphdr.nvph_datasize; nvp->nvp_nitems = nvphdr.nvph_nitems; return (ptr); fail: ERRNO_SET(EINVAL); return (NULL); } const unsigned char * nvpair_unpack_null(bool isbe __unused, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp __unused) { PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NULL); if (nvp->nvp_datasize != 0) { ERRNO_SET(EINVAL); return (NULL); } return (ptr); } const unsigned char * nvpair_unpack_bool(bool isbe __unused, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp) { uint8_t value; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BOOL); if (nvp->nvp_datasize != sizeof(value)) { ERRNO_SET(EINVAL); return (NULL); } if (*leftp < sizeof(value)) { ERRNO_SET(EINVAL); return (NULL); } memcpy(&value, ptr, sizeof(value)); ptr += sizeof(value); *leftp -= sizeof(value); if (value != 0 && value != 1) { ERRNO_SET(EINVAL); return (NULL); } nvp->nvp_data = (uint64_t)value; return (ptr); } const unsigned char * nvpair_unpack_number(bool isbe, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp) { PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NUMBER); if (nvp->nvp_datasize != sizeof(uint64_t)) { ERRNO_SET(EINVAL); return (NULL); } if (*leftp < sizeof(uint64_t)) { ERRNO_SET(EINVAL); return (NULL); } if (isbe) nvp->nvp_data = be64dec(ptr); else nvp->nvp_data = le64dec(ptr); ptr += sizeof(uint64_t); *leftp -= sizeof(uint64_t); return (ptr); } const unsigned char * nvpair_unpack_string(bool isbe __unused, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp) { PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_STRING); if (*leftp < nvp->nvp_datasize || nvp->nvp_datasize == 0) { ERRNO_SET(EINVAL); return (NULL); } if (strnlen((const char *)ptr, nvp->nvp_datasize) != nvp->nvp_datasize - 1) { ERRNO_SET(EINVAL); return (NULL); } nvp->nvp_data = (uint64_t)(uintptr_t)nv_strdup((const char *)ptr); if (nvp->nvp_data == 0) return (NULL); ptr += nvp->nvp_datasize; *leftp -= nvp->nvp_datasize; return (ptr); } const unsigned char * nvpair_unpack_nvlist(bool isbe __unused, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp, size_t nfds, nvlist_t **child) { nvlist_t *value; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NVLIST); if (*leftp < nvp->nvp_datasize || nvp->nvp_datasize == 0) { ERRNO_SET(EINVAL); return (NULL); } value = nvlist_create(0); if (value == NULL) return (NULL); ptr = nvlist_unpack_header(value, ptr, nfds, NULL, leftp); if (ptr == NULL) return (NULL); nvp->nvp_data = (uint64_t)(uintptr_t)value; *child = value; return (ptr); } #ifndef _KERNEL const unsigned char * nvpair_unpack_descriptor(bool isbe, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp, const int *fds, size_t nfds) { int64_t idx; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_DESCRIPTOR); if (nvp->nvp_datasize != sizeof(idx)) { ERRNO_SET(EINVAL); return (NULL); } if (*leftp < sizeof(idx)) { ERRNO_SET(EINVAL); return (NULL); } if (isbe) idx = be64dec(ptr); else idx = le64dec(ptr); if (idx < 0) { ERRNO_SET(EINVAL); return (NULL); } if ((size_t)idx >= nfds) { ERRNO_SET(EINVAL); return (NULL); } nvp->nvp_data = (uint64_t)fds[idx]; ptr += sizeof(idx); *leftp -= sizeof(idx); return (ptr); } #endif const unsigned char * nvpair_unpack_binary(bool isbe __unused, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp) { void *value; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BINARY); if (*leftp < nvp->nvp_datasize || nvp->nvp_datasize == 0) { ERRNO_SET(EINVAL); return (NULL); } value = nv_malloc(nvp->nvp_datasize); if (value == NULL) return (NULL); memcpy(value, ptr, nvp->nvp_datasize); ptr += nvp->nvp_datasize; *leftp -= nvp->nvp_datasize; nvp->nvp_data = (uint64_t)(uintptr_t)value; return (ptr); } const unsigned char * nvpair_unpack_bool_array(bool isbe __unused, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp) { uint8_t *value; size_t size; unsigned int i; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BOOL_ARRAY); size = sizeof(*value) * nvp->nvp_nitems; if (nvp->nvp_datasize != size || *leftp < size || nvp->nvp_nitems == 0 || size < nvp->nvp_nitems) { ERRNO_SET(EINVAL); return (NULL); } value = nv_malloc(size); if (value == NULL) return (NULL); for (i = 0; i < nvp->nvp_nitems; i++) { value[i] = *(const uint8_t *)ptr; ptr += sizeof(*value); *leftp -= sizeof(*value); } nvp->nvp_data = (uint64_t)(uintptr_t)value; return (ptr); } const unsigned char * nvpair_unpack_number_array(bool isbe, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp) { uint64_t *value; size_t size; unsigned int i; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NUMBER_ARRAY); size = sizeof(*value) * nvp->nvp_nitems; if (nvp->nvp_datasize != size || *leftp < size || nvp->nvp_nitems == 0 || size < nvp->nvp_nitems) { ERRNO_SET(EINVAL); return (NULL); } value = nv_malloc(size); if (value == NULL) return (NULL); for (i = 0; i < nvp->nvp_nitems; i++) { if (isbe) value[i] = be64dec(ptr); else value[i] = le64dec(ptr); ptr += sizeof(*value); *leftp -= sizeof(*value); } nvp->nvp_data = (uint64_t)(uintptr_t)value; return (ptr); } const unsigned char * nvpair_unpack_string_array(bool isbe __unused, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp) { ssize_t size; size_t len; const char *tmp; char **value; unsigned int ii, j; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_STRING_ARRAY); if (*leftp < nvp->nvp_datasize || nvp->nvp_datasize == 0 || nvp->nvp_nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } size = nvp->nvp_datasize; tmp = (const char *)ptr; for (ii = 0; ii < nvp->nvp_nitems; ii++) { len = strnlen(tmp, size - 1) + 1; size -= len; if (size < 0) { ERRNO_SET(EINVAL); return (NULL); } tmp += len; } if (size != 0) { ERRNO_SET(EINVAL); return (NULL); } value = nv_malloc(sizeof(*value) * nvp->nvp_nitems); if (value == NULL) return (NULL); for (ii = 0; ii < nvp->nvp_nitems; ii++) { value[ii] = nv_strdup((const char *)ptr); if (value[ii] == NULL) goto out; len = strlen(value[ii]) + 1; ptr += len; *leftp -= len; } nvp->nvp_data = (uint64_t)(uintptr_t)value; return (ptr); out: for (j = 0; j < ii; j++) nv_free(value[j]); nv_free(value); return (NULL); } #ifndef _KERNEL const unsigned char * nvpair_unpack_descriptor_array(bool isbe, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp, const int *fds, size_t nfds) { int64_t idx; size_t size; unsigned int ii; int *array; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_DESCRIPTOR_ARRAY); size = sizeof(idx) * nvp->nvp_nitems; if (nvp->nvp_datasize != size || *leftp < size || nvp->nvp_nitems == 0 || size < nvp->nvp_nitems) { ERRNO_SET(EINVAL); return (NULL); } array = (int *)nv_malloc(size); if (array == NULL) return (NULL); for (ii = 0; ii < nvp->nvp_nitems; ii++) { if (isbe) idx = be64dec(ptr); else idx = le64dec(ptr); if (idx < 0) { ERRNO_SET(EINVAL); nv_free(array); return (NULL); } if ((size_t)idx >= nfds) { ERRNO_SET(EINVAL); nv_free(array); return (NULL); } array[ii] = (uint64_t)fds[idx]; ptr += sizeof(idx); *leftp -= sizeof(idx); } nvp->nvp_data = (uint64_t)(uintptr_t)array; return (ptr); } #endif const unsigned char * nvpair_unpack_nvlist_array(bool isbe __unused, nvpair_t *nvp, const unsigned char *ptr, size_t *leftp, nvlist_t **firstel) { nvlist_t **value; nvpair_t *tmpnvp; unsigned int ii, j; size_t sizeup; PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NVLIST_ARRAY); sizeup = sizeof(struct nvpair_header) * nvp->nvp_nitems; if (nvp->nvp_nitems == 0 || sizeup < nvp->nvp_nitems || sizeup > *leftp) { ERRNO_SET(EINVAL); return (NULL); } value = nv_malloc(nvp->nvp_nitems * sizeof(*value)); if (value == NULL) return (NULL); for (ii = 0; ii < nvp->nvp_nitems; ii++) { value[ii] = nvlist_create(0); if (value[ii] == NULL) goto fail; if (ii > 0) { tmpnvp = nvpair_allocv(" ", NV_TYPE_NVLIST, (uint64_t)(uintptr_t)value[ii], 0, 0); if (tmpnvp == NULL) goto fail; nvlist_set_array_next(value[ii - 1], tmpnvp); } } nvlist_set_flags(value[nvp->nvp_nitems - 1], NV_FLAG_IN_ARRAY); nvp->nvp_data = (uint64_t)(uintptr_t)value; *firstel = value[0]; return (ptr); fail: ERRNO_SAVE(); for (j = 0; j <= ii; j++) nvlist_destroy(value[j]); nv_free(value); ERRNO_RESTORE(); return (NULL); } const unsigned char * nvpair_unpack(bool isbe, const unsigned char *ptr, size_t *leftp, nvpair_t **nvpp) { nvpair_t *nvp, *tmp; nvp = nv_calloc(1, sizeof(*nvp) + NV_NAME_MAX); if (nvp == NULL) return (NULL); nvp->nvp_name = (char *)(nvp + 1); ptr = nvpair_unpack_header(isbe, nvp, ptr, leftp); if (ptr == NULL) goto fail; tmp = nv_realloc(nvp, sizeof(*nvp) + strlen(nvp->nvp_name) + 1); if (tmp == NULL) goto fail; nvp = tmp; /* Update nvp_name after realloc(). */ nvp->nvp_name = (char *)(nvp + 1); nvp->nvp_data = 0x00; nvp->nvp_magic = NVPAIR_MAGIC; *nvpp = nvp; return (ptr); fail: nv_free(nvp); return (NULL); } int nvpair_type(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); return (nvp->nvp_type); } const char * nvpair_name(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); return (nvp->nvp_name); } nvpair_t * nvpair_create_stringf(const char *name, const char *valuefmt, ...) { va_list valueap; nvpair_t *nvp; va_start(valueap, valuefmt); nvp = nvpair_create_stringv(name, valuefmt, valueap); va_end(valueap); return (nvp); } nvpair_t * nvpair_create_stringv(const char *name, const char *valuefmt, va_list valueap) { nvpair_t *nvp; char *str; int len; len = nv_vasprintf(&str, valuefmt, valueap); if (len < 0) return (NULL); nvp = nvpair_create_string(name, str); if (nvp == NULL) nv_free(str); return (nvp); } nvpair_t * nvpair_create_null(const char *name) { return (nvpair_allocv(name, NV_TYPE_NULL, 0, 0, 0)); } nvpair_t * nvpair_create_bool(const char *name, bool value) { return (nvpair_allocv(name, NV_TYPE_BOOL, value ? 1 : 0, sizeof(uint8_t), 0)); } nvpair_t * nvpair_create_number(const char *name, uint64_t value) { return (nvpair_allocv(name, NV_TYPE_NUMBER, value, sizeof(value), 0)); } nvpair_t * nvpair_create_string(const char *name, const char *value) { nvpair_t *nvp; size_t size; char *data; if (value == NULL) { ERRNO_SET(EINVAL); return (NULL); } data = nv_strdup(value); if (data == NULL) return (NULL); size = strlen(value) + 1; nvp = nvpair_allocv(name, NV_TYPE_STRING, (uint64_t)(uintptr_t)data, size, 0); if (nvp == NULL) nv_free(data); return (nvp); } nvpair_t * nvpair_create_nvlist(const char *name, const nvlist_t *value) { nvlist_t *nvl; nvpair_t *nvp; if (value == NULL) { ERRNO_SET(EINVAL); return (NULL); } nvl = nvlist_clone(value); if (nvl == NULL) return (NULL); nvp = nvpair_allocv(name, NV_TYPE_NVLIST, (uint64_t)(uintptr_t)nvl, 0, 0); if (nvp == NULL) nvlist_destroy(nvl); else nvlist_set_parent(nvl, nvp); return (nvp); } #ifndef _KERNEL nvpair_t * nvpair_create_descriptor(const char *name, int value) { nvpair_t *nvp; - if (value < 0 || !fd_is_valid(value)) { - ERRNO_SET(EBADF); - return (NULL); - } - value = fcntl(value, F_DUPFD_CLOEXEC, 0); if (value < 0) return (NULL); nvp = nvpair_allocv(name, NV_TYPE_DESCRIPTOR, (uint64_t)value, sizeof(int64_t), 0); if (nvp == NULL) { ERRNO_SAVE(); close(value); ERRNO_RESTORE(); } return (nvp); } #endif nvpair_t * nvpair_create_binary(const char *name, const void *value, size_t size) { nvpair_t *nvp; void *data; if (value == NULL || size == 0) { ERRNO_SET(EINVAL); return (NULL); } data = nv_malloc(size); if (data == NULL) return (NULL); memcpy(data, value, size); nvp = nvpair_allocv(name, NV_TYPE_BINARY, (uint64_t)(uintptr_t)data, size, 0); if (nvp == NULL) nv_free(data); return (nvp); } nvpair_t * nvpair_create_bool_array(const char *name, const bool *value, size_t nitems) { nvpair_t *nvp; size_t size; void *data; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } size = sizeof(value[0]) * nitems; data = nv_malloc(size); if (data == NULL) return (NULL); memcpy(data, value, size); nvp = nvpair_allocv(name, NV_TYPE_BOOL_ARRAY, (uint64_t)(uintptr_t)data, size, nitems); if (nvp == NULL) { ERRNO_SAVE(); nv_free(data); ERRNO_RESTORE(); } return (nvp); } nvpair_t * nvpair_create_number_array(const char *name, const uint64_t *value, size_t nitems) { nvpair_t *nvp; size_t size; void *data; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } size = sizeof(value[0]) * nitems; data = nv_malloc(size); if (data == NULL) return (NULL); memcpy(data, value, size); nvp = nvpair_allocv(name, NV_TYPE_NUMBER_ARRAY, (uint64_t)(uintptr_t)data, size, nitems); if (nvp == NULL) { ERRNO_SAVE(); nv_free(data); ERRNO_RESTORE(); } return (nvp); } nvpair_t * nvpair_create_string_array(const char *name, const char * const *value, size_t nitems) { nvpair_t *nvp; unsigned int ii; size_t datasize, size; char **data; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } nvp = NULL; datasize = 0; data = nv_malloc(sizeof(value[0]) * nitems); if (data == NULL) return (NULL); for (ii = 0; ii < nitems; ii++) { if (value[ii] == NULL) { ERRNO_SET(EINVAL); goto fail; } size = strlen(value[ii]) + 1; datasize += size; data[ii] = nv_strdup(value[ii]); if (data[ii] == NULL) goto fail; } nvp = nvpair_allocv(name, NV_TYPE_STRING_ARRAY, (uint64_t)(uintptr_t)data, datasize, nitems); fail: if (nvp == NULL) { ERRNO_SAVE(); for (; ii > 0; ii--) nv_free(data[ii - 1]); nv_free(data); ERRNO_RESTORE(); } return (nvp); } nvpair_t * nvpair_create_nvlist_array(const char *name, const nvlist_t * const *value, size_t nitems) { unsigned int ii; nvlist_t **nvls; nvpair_t *parent; int flags; nvls = NULL; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } nvls = nv_malloc(sizeof(value[0]) * nitems); if (nvls == NULL) return (NULL); for (ii = 0; ii < nitems; ii++) { if (value[ii] == NULL) { ERRNO_SET(EINVAL); goto fail; } nvls[ii] = nvlist_clone(value[ii]); if (nvls[ii] == NULL) goto fail; if (ii > 0) { nvpair_t *nvp; nvp = nvpair_allocv(" ", NV_TYPE_NVLIST, (uint64_t)(uintptr_t)nvls[ii], 0, 0); if (nvp == NULL) { ERRNO_SAVE(); nvlist_destroy(nvls[ii]); ERRNO_RESTORE(); goto fail; } nvlist_set_array_next(nvls[ii - 1], nvp); } } flags = nvlist_flags(nvls[nitems - 1]) | NV_FLAG_IN_ARRAY; nvlist_set_flags(nvls[nitems - 1], flags); parent = nvpair_allocv(name, NV_TYPE_NVLIST_ARRAY, (uint64_t)(uintptr_t)nvls, 0, nitems); if (parent == NULL) goto fail; for (ii = 0; ii < nitems; ii++) nvlist_set_parent(nvls[ii], parent); return (parent); fail: ERRNO_SAVE(); for (; ii > 0; ii--) nvlist_destroy(nvls[ii - 1]); nv_free(nvls); ERRNO_RESTORE(); return (NULL); } #ifndef _KERNEL nvpair_t * nvpair_create_descriptor_array(const char *name, const int *value, size_t nitems) { unsigned int ii; nvpair_t *nvp; int *fds; if (value == NULL) { ERRNO_SET(EINVAL); return (NULL); } nvp = NULL; fds = nv_malloc(sizeof(value[0]) * nitems); if (fds == NULL) return (NULL); for (ii = 0; ii < nitems; ii++) { if (value[ii] == -1) { fds[ii] = -1; } else { - if (!fd_is_valid(value[ii])) { - ERRNO_SET(EBADF); - goto fail; - } - fds[ii] = fcntl(value[ii], F_DUPFD_CLOEXEC, 0); if (fds[ii] == -1) goto fail; } } nvp = nvpair_allocv(name, NV_TYPE_DESCRIPTOR_ARRAY, (uint64_t)(uintptr_t)fds, sizeof(int64_t) * nitems, nitems); fail: if (nvp == NULL) { ERRNO_SAVE(); for (; ii > 0; ii--) { if (fds[ii - 1] != -1) close(fds[ii - 1]); } nv_free(fds); ERRNO_RESTORE(); } return (nvp); } #endif nvpair_t * nvpair_move_string(const char *name, char *value) { nvpair_t *nvp; if (value == NULL) { ERRNO_SET(EINVAL); return (NULL); } nvp = nvpair_allocv(name, NV_TYPE_STRING, (uint64_t)(uintptr_t)value, strlen(value) + 1, 0); if (nvp == NULL) { ERRNO_SAVE(); nv_free(value); ERRNO_RESTORE(); } return (nvp); } nvpair_t * nvpair_move_nvlist(const char *name, nvlist_t *value) { nvpair_t *nvp; if (value == NULL || nvlist_get_nvpair_parent(value) != NULL) { ERRNO_SET(EINVAL); return (NULL); } if (nvlist_error(value) != 0) { ERRNO_SET(nvlist_error(value)); nvlist_destroy(value); return (NULL); } nvp = nvpair_allocv(name, NV_TYPE_NVLIST, (uint64_t)(uintptr_t)value, 0, 0); if (nvp == NULL) nvlist_destroy(value); else nvlist_set_parent(value, nvp); return (nvp); } #ifndef _KERNEL nvpair_t * nvpair_move_descriptor(const char *name, int value) { nvpair_t *nvp; if (value < 0 || !fd_is_valid(value)) { ERRNO_SET(EBADF); return (NULL); } nvp = nvpair_allocv(name, NV_TYPE_DESCRIPTOR, (uint64_t)value, sizeof(int64_t), 0); if (nvp == NULL) { ERRNO_SAVE(); close(value); ERRNO_RESTORE(); } return (nvp); } #endif nvpair_t * nvpair_move_binary(const char *name, void *value, size_t size) { nvpair_t *nvp; if (value == NULL || size == 0) { ERRNO_SET(EINVAL); return (NULL); } nvp = nvpair_allocv(name, NV_TYPE_BINARY, (uint64_t)(uintptr_t)value, size, 0); if (nvp == NULL) { ERRNO_SAVE(); nv_free(value); ERRNO_RESTORE(); } return (nvp); } nvpair_t * nvpair_move_bool_array(const char *name, bool *value, size_t nitems) { nvpair_t *nvp; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } nvp = nvpair_allocv(name, NV_TYPE_BOOL_ARRAY, (uint64_t)(uintptr_t)value, sizeof(value[0]) * nitems, nitems); if (nvp == NULL) { ERRNO_SAVE(); nv_free(value); ERRNO_RESTORE(); } return (nvp); } nvpair_t * nvpair_move_string_array(const char *name, char **value, size_t nitems) { nvpair_t *nvp; size_t i, size; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } size = 0; for (i = 0; i < nitems; i++) { if (value[i] == NULL) { ERRNO_SET(EINVAL); return (NULL); } size += strlen(value[i]) + 1; } nvp = nvpair_allocv(name, NV_TYPE_STRING_ARRAY, (uint64_t)(uintptr_t)value, size, nitems); if (nvp == NULL) { ERRNO_SAVE(); for (i = 0; i < nitems; i++) nv_free(value[i]); nv_free(value); ERRNO_RESTORE(); } return (nvp); } nvpair_t * nvpair_move_number_array(const char *name, uint64_t *value, size_t nitems) { nvpair_t *nvp; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } nvp = nvpair_allocv(name, NV_TYPE_NUMBER_ARRAY, (uint64_t)(uintptr_t)value, sizeof(value[0]) * nitems, nitems); if (nvp == NULL) { ERRNO_SAVE(); nv_free(value); ERRNO_RESTORE(); } return (nvp); } nvpair_t * nvpair_move_nvlist_array(const char *name, nvlist_t **value, size_t nitems) { nvpair_t *parent; unsigned int ii; int flags; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } for (ii = 0; ii < nitems; ii++) { if (value == NULL || nvlist_error(value[ii]) != 0 || nvlist_get_pararr(value[ii], NULL) != NULL) { ERRNO_SET(EINVAL); goto fail; } if (ii > 0) { nvpair_t *nvp; nvp = nvpair_allocv(" ", NV_TYPE_NVLIST, (uint64_t)(uintptr_t)value[ii], 0, 0); if (nvp == NULL) goto fail; nvlist_set_array_next(value[ii - 1], nvp); } } flags = nvlist_flags(value[nitems - 1]) | NV_FLAG_IN_ARRAY; nvlist_set_flags(value[nitems - 1], flags); parent = nvpair_allocv(name, NV_TYPE_NVLIST_ARRAY, (uint64_t)(uintptr_t)value, 0, nitems); if (parent == NULL) goto fail; for (ii = 0; ii < nitems; ii++) nvlist_set_parent(value[ii], parent); return (parent); fail: ERRNO_SAVE(); for (ii = 0; ii < nitems; ii++) { if (value[ii] != NULL && nvlist_get_pararr(value[ii], NULL) != NULL) { nvlist_destroy(value[ii]); } } nv_free(value); ERRNO_RESTORE(); return (NULL); } #ifndef _KERNEL nvpair_t * nvpair_move_descriptor_array(const char *name, int *value, size_t nitems) { nvpair_t *nvp; size_t i; if (value == NULL || nitems == 0) { ERRNO_SET(EINVAL); return (NULL); } for (i = 0; i < nitems; i++) { if (value[i] != -1 && !fd_is_valid(value[i])) { ERRNO_SET(EBADF); goto fail; } } nvp = nvpair_allocv(name, NV_TYPE_DESCRIPTOR_ARRAY, (uint64_t)(uintptr_t)value, sizeof(value[0]) * nitems, nitems); if (nvp == NULL) goto fail; return (nvp); fail: ERRNO_SAVE(); for (i = 0; i < nitems; i++) { if (fd_is_valid(value[i])) close(value[i]); } nv_free(value); ERRNO_RESTORE(); return (NULL); } #endif bool nvpair_get_bool(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); return (nvp->nvp_data == 1); } uint64_t nvpair_get_number(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); return (nvp->nvp_data); } const char * nvpair_get_string(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_STRING); return ((const char *)(intptr_t)nvp->nvp_data); } const nvlist_t * nvpair_get_nvlist(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NVLIST); return ((const nvlist_t *)(intptr_t)nvp->nvp_data); } #ifndef _KERNEL int nvpair_get_descriptor(const nvpair_t *nvp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_DESCRIPTOR); return ((int)nvp->nvp_data); } #endif const void * nvpair_get_binary(const nvpair_t *nvp, size_t *sizep) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BINARY); if (sizep != NULL) *sizep = nvp->nvp_datasize; return ((const void *)(intptr_t)nvp->nvp_data); } const bool * nvpair_get_bool_array(const nvpair_t *nvp, size_t *nitems) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BOOL_ARRAY); if (nitems != NULL) *nitems = nvp->nvp_nitems; return ((const bool *)(intptr_t)nvp->nvp_data); } const uint64_t * nvpair_get_number_array(const nvpair_t *nvp, size_t *nitems) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NUMBER_ARRAY); if (nitems != NULL) *nitems = nvp->nvp_nitems; return ((const uint64_t *)(intptr_t)nvp->nvp_data); } const char * const * nvpair_get_string_array(const nvpair_t *nvp, size_t *nitems) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_STRING_ARRAY); if (nitems != NULL) *nitems = nvp->nvp_nitems; return ((const char * const *)(intptr_t)nvp->nvp_data); } const nvlist_t * const * nvpair_get_nvlist_array(const nvpair_t *nvp, size_t *nitems) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NVLIST_ARRAY); if (nitems != NULL) *nitems = nvp->nvp_nitems; return ((const nvlist_t * const *)((intptr_t)nvp->nvp_data)); } #ifndef _KERNEL const int * nvpair_get_descriptor_array(const nvpair_t *nvp, size_t *nitems) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_DESCRIPTOR_ARRAY); if (nitems != NULL) *nitems = nvp->nvp_nitems; return ((const int *)(intptr_t)nvp->nvp_data); } #endif int nvpair_append_bool_array(nvpair_t *nvp, const bool value) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_BOOL_ARRAY); return (nvpair_append(nvp, &value, sizeof(value), sizeof(value))); } int nvpair_append_number_array(nvpair_t *nvp, const uint64_t value) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NUMBER_ARRAY); return (nvpair_append(nvp, &value, sizeof(value), sizeof(value))); } int nvpair_append_string_array(nvpair_t *nvp, const char *value) { char *str; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_STRING_ARRAY); if (value == NULL) { ERRNO_SET(EINVAL); return (-1); } str = nv_strdup(value); if (str == NULL) { return (-1); } if (nvpair_append(nvp, &str, sizeof(str), strlen(str) + 1) == -1) { nv_free(str); return (-1); } return (0); } int nvpair_append_nvlist_array(nvpair_t *nvp, const nvlist_t *value) { nvpair_t *tmpnvp; nvlist_t *nvl, *prev; int flags; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_NVLIST_ARRAY); if (value == NULL || nvlist_error(value) != 0 || nvlist_get_pararr(value, NULL) != NULL) { ERRNO_SET(EINVAL); return (-1); } nvl = nvlist_clone(value); if (nvl == NULL) { return (-1); } flags = nvlist_flags(nvl) | NV_FLAG_IN_ARRAY; nvlist_set_flags(nvl, flags); tmpnvp = NULL; prev = NULL; if (nvp->nvp_nitems > 0) { nvlist_t **nvls = (void *)(uintptr_t)nvp->nvp_data; prev = nvls[nvp->nvp_nitems - 1]; PJDLOG_ASSERT(prev != NULL); tmpnvp = nvpair_allocv(" ", NV_TYPE_NVLIST, (uint64_t)(uintptr_t)nvl, 0, 0); if (tmpnvp == NULL) { goto fail; } } if (nvpair_append(nvp, &nvl, sizeof(nvl), 0) == -1) { goto fail; } if (tmpnvp) { NVPAIR_ASSERT(tmpnvp); nvlist_set_array_next(prev, tmpnvp); } nvlist_set_parent(nvl, nvp); return (0); fail: if (tmpnvp) { nvpair_free(tmpnvp); } nvlist_destroy(nvl); return (-1); } #ifndef _KERNEL int nvpair_append_descriptor_array(nvpair_t *nvp, const int value) { int fd; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_type == NV_TYPE_DESCRIPTOR_ARRAY); - if (value < 0 || !fd_is_valid(value)) { - ERRNO_SET(EBADF); - return -1; - } fd = fcntl(value, F_DUPFD_CLOEXEC, 0); if (fd == -1) { return (-1); } if (nvpair_append(nvp, &fd, sizeof(fd), sizeof(fd)) == -1) { close(fd); return (-1); } return (0); } #endif void nvpair_free(nvpair_t *nvp) { size_t i; NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_list == NULL); nvp->nvp_magic = 0; switch (nvp->nvp_type) { #ifndef _KERNEL case NV_TYPE_DESCRIPTOR: close((int)nvp->nvp_data); break; case NV_TYPE_DESCRIPTOR_ARRAY: for (i = 0; i < nvp->nvp_nitems; i++) close(((int *)(intptr_t)nvp->nvp_data)[i]); break; #endif case NV_TYPE_NVLIST: nvlist_destroy((nvlist_t *)(intptr_t)nvp->nvp_data); break; case NV_TYPE_STRING: nv_free((char *)(intptr_t)nvp->nvp_data); break; case NV_TYPE_BINARY: nv_free((void *)(intptr_t)nvp->nvp_data); break; case NV_TYPE_NVLIST_ARRAY: for (i = 0; i < nvp->nvp_nitems; i++) { nvlist_destroy( ((nvlist_t **)(intptr_t)nvp->nvp_data)[i]); } nv_free(((nvlist_t **)(intptr_t)nvp->nvp_data)); break; case NV_TYPE_NUMBER_ARRAY: nv_free((uint64_t *)(intptr_t)nvp->nvp_data); break; case NV_TYPE_BOOL_ARRAY: nv_free((bool *)(intptr_t)nvp->nvp_data); break; case NV_TYPE_STRING_ARRAY: for (i = 0; i < nvp->nvp_nitems; i++) nv_free(((char **)(intptr_t)nvp->nvp_data)[i]); nv_free((char **)(intptr_t)nvp->nvp_data); break; } nv_free(nvp); } void nvpair_free_structure(nvpair_t *nvp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvp->nvp_list == NULL); nvp->nvp_magic = 0; nv_free(nvp); } const char * nvpair_type_string(int type) { switch (type) { case NV_TYPE_NULL: return ("NULL"); case NV_TYPE_BOOL: return ("BOOL"); case NV_TYPE_NUMBER: return ("NUMBER"); case NV_TYPE_STRING: return ("STRING"); case NV_TYPE_NVLIST: return ("NVLIST"); case NV_TYPE_DESCRIPTOR: return ("DESCRIPTOR"); case NV_TYPE_BINARY: return ("BINARY"); case NV_TYPE_BOOL_ARRAY: return ("BOOL ARRAY"); case NV_TYPE_NUMBER_ARRAY: return ("NUMBER ARRAY"); case NV_TYPE_STRING_ARRAY: return ("STRING ARRAY"); case NV_TYPE_NVLIST_ARRAY: return ("NVLIST ARRAY"); case NV_TYPE_DESCRIPTOR_ARRAY: return ("DESCRIPTOR ARRAY"); default: return (""); } } Index: projects/clang700-import/sys/dev/amdsmn/amdsmn.c =================================================================== --- projects/clang700-import/sys/dev/amdsmn/amdsmn.c (revision 340426) +++ projects/clang700-import/sys/dev/amdsmn/amdsmn.c (revision 340427) @@ -1,195 +1,206 @@ /*- * Copyright (c) 2017 Conrad Meyer * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for the AMD Family 17h CPU System Management Network. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #define SMN_ADDR_REG 0x60 #define SMN_DATA_REG 0x64 +#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 +#define PCI_DEVICE_ID_AMD_17H_ROOT_DF_F3 0x1463 +#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 +#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT_DF_F3 0x15eb + struct amdsmn_softc { struct mtx smn_lock; }; static struct pciid { - uint32_t device_id; + uint16_t amdsmn_vendorid; + uint16_t amdsmn_deviceid; } amdsmn_ids[] = { - { 0x14501022 }, + { CPU_VENDOR_AMD, PCI_DEVICE_ID_AMD_17H_ROOT }, + { CPU_VENDOR_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT }, }; /* * Device methods. */ static void amdsmn_identify(driver_t *driver, device_t parent); static int amdsmn_probe(device_t dev); static int amdsmn_attach(device_t dev); static int amdsmn_detach(device_t dev); static device_method_t amdsmn_methods[] = { /* Device interface */ DEVMETHOD(device_identify, amdsmn_identify), DEVMETHOD(device_probe, amdsmn_probe), DEVMETHOD(device_attach, amdsmn_attach), DEVMETHOD(device_detach, amdsmn_detach), DEVMETHOD_END }; static driver_t amdsmn_driver = { "amdsmn", amdsmn_methods, sizeof(struct amdsmn_softc), }; static devclass_t amdsmn_devclass; DRIVER_MODULE(amdsmn, hostb, amdsmn_driver, amdsmn_devclass, NULL, NULL); MODULE_VERSION(amdsmn, 1); -MODULE_PNP_INFO("W32:vendor/device", pci, amdsmn, amdsmn_ids, +MODULE_PNP_INFO("U16:vendor;U16:device", pci, amdsmn, amdsmn_ids, nitems(amdsmn_ids)); static bool amdsmn_match(device_t parent) { - uint32_t devid; + uint16_t vendor, device; size_t i; - devid = pci_get_devid(parent); + vendor = pci_get_vendor(parent); + device = pci_get_device(parent); + for (i = 0; i < nitems(amdsmn_ids); i++) - if (amdsmn_ids[i].device_id == devid) + if (vendor == amdsmn_ids[i].amdsmn_vendorid && + device == amdsmn_ids[i].amdsmn_deviceid) return (true); return (false); } static void amdsmn_identify(driver_t *driver, device_t parent) { device_t child; /* Make sure we're not being doubly invoked. */ if (device_find_child(parent, "amdsmn", -1) != NULL) return; if (!amdsmn_match(parent)) return; child = device_add_child(parent, "amdsmn", -1); if (child == NULL) device_printf(parent, "add amdsmn child failed\n"); } static int amdsmn_probe(device_t dev) { uint32_t family; if (resource_disabled("amdsmn", 0)) return (ENXIO); if (!amdsmn_match(device_get_parent(dev))) return (ENXIO); family = CPUID_TO_FAMILY(cpu_id); switch (family) { case 0x17: break; default: return (ENXIO); } device_set_desc(dev, "AMD Family 17h System Management Network"); return (BUS_PROBE_GENERIC); } static int amdsmn_attach(device_t dev) { struct amdsmn_softc *sc = device_get_softc(dev); mtx_init(&sc->smn_lock, "SMN mtx", "SMN", MTX_DEF); return (0); } int amdsmn_detach(device_t dev) { struct amdsmn_softc *sc = device_get_softc(dev); mtx_destroy(&sc->smn_lock); return (0); } int amdsmn_read(device_t dev, uint32_t addr, uint32_t *value) { struct amdsmn_softc *sc = device_get_softc(dev); device_t parent; parent = device_get_parent(dev); mtx_lock(&sc->smn_lock); pci_write_config(parent, SMN_ADDR_REG, addr, 4); *value = pci_read_config(parent, SMN_DATA_REG, 4); mtx_unlock(&sc->smn_lock); return (0); } int amdsmn_write(device_t dev, uint32_t addr, uint32_t value) { struct amdsmn_softc *sc = device_get_softc(dev); device_t parent; parent = device_get_parent(dev); mtx_lock(&sc->smn_lock); pci_write_config(parent, SMN_ADDR_REG, addr, 4); pci_write_config(parent, SMN_DATA_REG, value, 4); mtx_unlock(&sc->smn_lock); return (0); } Index: projects/clang700-import/sys/dev/amdtemp/amdtemp.c =================================================================== --- projects/clang700-import/sys/dev/amdtemp/amdtemp.c (revision 340426) +++ projects/clang700-import/sys/dev/amdtemp/amdtemp.c (revision 340427) @@ -1,604 +1,617 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, 2009 Rui Paulo * Copyright (c) 2009 Norikatsu Shigemura * Copyright (c) 2009-2012 Jung-uk Kim * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for the AMD CPU on-die thermal sensors. * Initially based on the k8temp Linux driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { CORE0_SENSOR0, CORE0_SENSOR1, CORE1_SENSOR0, CORE1_SENSOR1, CORE0, CORE1 } amdsensor_t; struct amdtemp_softc { int sc_ncores; int sc_ntemps; int sc_flags; #define AMDTEMP_FLAG_CS_SWAP 0x01 /* ThermSenseCoreSel is inverted. */ #define AMDTEMP_FLAG_CT_10BIT 0x02 /* CurTmp is 10-bit wide. */ #define AMDTEMP_FLAG_ALT_OFFSET 0x04 /* CurTmp starts at -28C. */ int32_t sc_offset; int32_t (*sc_gettemp)(device_t, amdsensor_t); struct sysctl_oid *sc_sysctl_cpu[MAXCPU]; struct intr_config_hook sc_ich; device_t sc_smn; }; #define VENDORID_AMD 0x1022 #define DEVICEID_AMD_MISC0F 0x1103 #define DEVICEID_AMD_MISC10 0x1203 #define DEVICEID_AMD_MISC11 0x1303 #define DEVICEID_AMD_MISC12 0x1403 #define DEVICEID_AMD_MISC14 0x1703 #define DEVICEID_AMD_MISC15 0x1603 #define DEVICEID_AMD_MISC16 0x1533 #define DEVICEID_AMD_MISC16_M30H 0x1583 #define DEVICEID_AMD_MISC17 0x141d -#define DEVICEID_AMD_HOSTB17H 0x1450 +#define DEVICEID_AMD_HOSTB17H_ROOT 0x1450 +#define DEVICEID_AMD_HOSTB17H_DF_F3 0x1463 +#define DEVICEID_AMD_HOSTB17H_M10H_ROOT 0x15d0 +#define DEVICEID_AMD_HOSTB17H_M10H_DF_F3 0x15eb static struct amdtemp_product { uint16_t amdtemp_vendorid; uint16_t amdtemp_deviceid; } amdtemp_products[] = { { VENDORID_AMD, DEVICEID_AMD_MISC0F }, { VENDORID_AMD, DEVICEID_AMD_MISC10 }, { VENDORID_AMD, DEVICEID_AMD_MISC11 }, { VENDORID_AMD, DEVICEID_AMD_MISC12 }, { VENDORID_AMD, DEVICEID_AMD_MISC14 }, { VENDORID_AMD, DEVICEID_AMD_MISC15 }, { VENDORID_AMD, DEVICEID_AMD_MISC16 }, { VENDORID_AMD, DEVICEID_AMD_MISC16_M30H }, { VENDORID_AMD, DEVICEID_AMD_MISC17 }, - { VENDORID_AMD, DEVICEID_AMD_HOSTB17H }, + { VENDORID_AMD, DEVICEID_AMD_HOSTB17H_ROOT }, + { VENDORID_AMD, DEVICEID_AMD_HOSTB17H_M10H_ROOT }, }; /* * Reported Temperature Control Register */ #define AMDTEMP_REPTMP_CTRL 0xa4 /* * Reported Temperature, Family 17h + * + * According to AMD OSRR for 17H, section 4.2.1, bits 31-21 of this register + * provide the current temp. bit 19, when clear, means the temp is reported in + * a range 0.."225C" (probable typo for 255C), and when set changes the range + * to -49..206C. */ -#define AMDTEMP_17H_CUR_TMP 0x59800 +#define AMDTEMP_17H_CUR_TMP 0x59800 +#define AMDTEMP_17H_CUR_TMP_RANGE_SEL (1 << 19) +#define AMDTEMP_17H_CUR_TMP_RANGE_OFF 490 /* * Thermaltrip Status Register (Family 0Fh only) */ #define AMDTEMP_THERMTP_STAT 0xe4 #define AMDTEMP_TTSR_SELCORE 0x04 #define AMDTEMP_TTSR_SELSENSOR 0x40 /* * DRAM Configuration High Register */ #define AMDTEMP_DRAM_CONF_HIGH 0x94 /* Function 2 */ #define AMDTEMP_DRAM_MODE_DDR3 0x0100 /* * CPU Family/Model Register */ #define AMDTEMP_CPUID 0xfc /* * Device methods. */ static void amdtemp_identify(driver_t *driver, device_t parent); static int amdtemp_probe(device_t dev); static int amdtemp_attach(device_t dev); static void amdtemp_intrhook(void *arg); static int amdtemp_detach(device_t dev); static int amdtemp_match(device_t dev); static int32_t amdtemp_gettemp0f(device_t dev, amdsensor_t sensor); static int32_t amdtemp_gettemp(device_t dev, amdsensor_t sensor); static int32_t amdtemp_gettemp17h(device_t dev, amdsensor_t sensor); static int amdtemp_sysctl(SYSCTL_HANDLER_ARGS); static device_method_t amdtemp_methods[] = { /* Device interface */ DEVMETHOD(device_identify, amdtemp_identify), DEVMETHOD(device_probe, amdtemp_probe), DEVMETHOD(device_attach, amdtemp_attach), DEVMETHOD(device_detach, amdtemp_detach), DEVMETHOD_END }; static driver_t amdtemp_driver = { "amdtemp", amdtemp_methods, sizeof(struct amdtemp_softc), }; static devclass_t amdtemp_devclass; DRIVER_MODULE(amdtemp, hostb, amdtemp_driver, amdtemp_devclass, NULL, NULL); MODULE_VERSION(amdtemp, 1); MODULE_DEPEND(amdtemp, amdsmn, 1, 1, 1); MODULE_PNP_INFO("U16:vendor;U16:device", pci, amdtemp, amdtemp_products, nitems(amdtemp_products)); static int amdtemp_match(device_t dev) { int i; uint16_t vendor, devid; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); for (i = 0; i < nitems(amdtemp_products); i++) { if (vendor == amdtemp_products[i].amdtemp_vendorid && devid == amdtemp_products[i].amdtemp_deviceid) return (1); } return (0); } static void amdtemp_identify(driver_t *driver, device_t parent) { device_t child; /* Make sure we're not being doubly invoked. */ if (device_find_child(parent, "amdtemp", -1) != NULL) return; if (amdtemp_match(parent)) { child = device_add_child(parent, "amdtemp", -1); if (child == NULL) device_printf(parent, "add amdtemp child failed\n"); } } static int amdtemp_probe(device_t dev) { uint32_t family, model; if (resource_disabled("amdtemp", 0)) return (ENXIO); if (!amdtemp_match(device_get_parent(dev))) return (ENXIO); family = CPUID_TO_FAMILY(cpu_id); model = CPUID_TO_MODEL(cpu_id); switch (family) { case 0x0f: if ((model == 0x04 && (cpu_id & CPUID_STEPPING) == 0) || (model == 0x05 && (cpu_id & CPUID_STEPPING) <= 1)) return (ENXIO); break; case 0x10: case 0x11: case 0x12: case 0x14: case 0x15: case 0x16: case 0x17: break; default: return (ENXIO); } device_set_desc(dev, "AMD CPU On-Die Thermal Sensors"); return (BUS_PROBE_GENERIC); } static int amdtemp_attach(device_t dev) { char tn[32]; u_int regs[4]; struct amdtemp_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *sysctlctx; struct sysctl_oid *sysctlnode; uint32_t cpuid, family, model; u_int bid; int erratum319, unit; erratum319 = 0; /* * CPUID Register is available from Revision F. */ cpuid = cpu_id; family = CPUID_TO_FAMILY(cpuid); model = CPUID_TO_MODEL(cpuid); if ((family != 0x0f || model >= 0x40) && family != 0x17) { cpuid = pci_read_config(dev, AMDTEMP_CPUID, 4); family = CPUID_TO_FAMILY(cpuid); model = CPUID_TO_MODEL(cpuid); } switch (family) { case 0x0f: /* * Thermaltrip Status Register * * - ThermSenseCoreSel * * Revision F & G: 0 - Core1, 1 - Core0 * Other: 0 - Core0, 1 - Core1 * * - CurTmp * * Revision G: bits 23-14 * Other: bits 23-16 * * XXX According to the BKDG, CurTmp, ThermSenseSel and * ThermSenseCoreSel bits were introduced in Revision F * but CurTmp seems working fine as early as Revision C. * However, it is not clear whether ThermSenseSel and/or * ThermSenseCoreSel work in undocumented cases as well. * In fact, the Linux driver suggests it may not work but * we just assume it does until we find otherwise. * * XXX According to Linux, CurTmp starts at -28C on * Socket AM2 Revision G processors, which is not * documented anywhere. */ if (model >= 0x40) sc->sc_flags |= AMDTEMP_FLAG_CS_SWAP; if (model >= 0x60 && model != 0xc1) { do_cpuid(0x80000001, regs); bid = (regs[1] >> 9) & 0x1f; switch (model) { case 0x68: /* Socket S1g1 */ case 0x6c: case 0x7c: break; case 0x6b: /* Socket AM2 and ASB1 (2 cores) */ if (bid != 0x0b && bid != 0x0c) sc->sc_flags |= AMDTEMP_FLAG_ALT_OFFSET; break; case 0x6f: /* Socket AM2 and ASB1 (1 core) */ case 0x7f: if (bid != 0x07 && bid != 0x09 && bid != 0x0c) sc->sc_flags |= AMDTEMP_FLAG_ALT_OFFSET; break; default: sc->sc_flags |= AMDTEMP_FLAG_ALT_OFFSET; } sc->sc_flags |= AMDTEMP_FLAG_CT_10BIT; } /* * There are two sensors per core. */ sc->sc_ntemps = 2; sc->sc_gettemp = amdtemp_gettemp0f; break; case 0x10: /* * Erratum 319 Inaccurate Temperature Measurement * * http://support.amd.com/us/Processor_TechDocs/41322.pdf */ do_cpuid(0x80000001, regs); switch ((regs[1] >> 28) & 0xf) { case 0: /* Socket F */ erratum319 = 1; break; case 1: /* Socket AM2+ or AM3 */ if ((pci_cfgregread(pci_get_bus(dev), pci_get_slot(dev), 2, AMDTEMP_DRAM_CONF_HIGH, 2) & AMDTEMP_DRAM_MODE_DDR3) != 0 || model > 0x04 || (model == 0x04 && (cpuid & CPUID_STEPPING) >= 3)) break; /* XXX 00100F42h (RB-C2) exists in both formats. */ erratum319 = 1; break; } /* FALLTHROUGH */ case 0x11: case 0x12: case 0x14: case 0x15: case 0x16: /* * There is only one sensor per package. */ sc->sc_ntemps = 1; sc->sc_gettemp = amdtemp_gettemp; break; case 0x17: sc->sc_ntemps = 1; sc->sc_gettemp = amdtemp_gettemp17h; sc->sc_smn = device_find_child( device_get_parent(dev), "amdsmn", -1); if (sc->sc_smn == NULL) { if (bootverbose) device_printf(dev, "No SMN device found\n"); return (ENXIO); } break; } /* Find number of cores per package. */ sc->sc_ncores = (amd_feature2 & AMDID2_CMP) != 0 ? (cpu_procinfo2 & AMDID_CMP_CORES) + 1 : 1; if (sc->sc_ncores > MAXCPU) return (ENXIO); if (erratum319) device_printf(dev, "Erratum 319: temperature measurement may be inaccurate\n"); if (bootverbose) device_printf(dev, "Found %d cores and %d sensors.\n", sc->sc_ncores, sc->sc_ntemps > 1 ? sc->sc_ntemps * sc->sc_ncores : 1); /* * dev.amdtemp.N tree. */ unit = device_get_unit(dev); snprintf(tn, sizeof(tn), "dev.amdtemp.%d.sensor_offset", unit); TUNABLE_INT_FETCH(tn, &sc->sc_offset); sysctlctx = device_get_sysctl_ctx(dev); SYSCTL_ADD_INT(sysctlctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sensor_offset", CTLFLAG_RW, &sc->sc_offset, 0, "Temperature sensor offset"); sysctlnode = SYSCTL_ADD_NODE(sysctlctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "core0", CTLFLAG_RD, 0, "Core 0"); SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(sysctlnode), OID_AUTO, "sensor0", CTLTYPE_INT | CTLFLAG_RD, dev, CORE0_SENSOR0, amdtemp_sysctl, "IK", "Core 0 / Sensor 0 temperature"); if (sc->sc_ntemps > 1) { SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(sysctlnode), OID_AUTO, "sensor1", CTLTYPE_INT | CTLFLAG_RD, dev, CORE0_SENSOR1, amdtemp_sysctl, "IK", "Core 0 / Sensor 1 temperature"); if (sc->sc_ncores > 1) { sysctlnode = SYSCTL_ADD_NODE(sysctlctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "core1", CTLFLAG_RD, 0, "Core 1"); SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(sysctlnode), OID_AUTO, "sensor0", CTLTYPE_INT | CTLFLAG_RD, dev, CORE1_SENSOR0, amdtemp_sysctl, "IK", "Core 1 / Sensor 0 temperature"); SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(sysctlnode), OID_AUTO, "sensor1", CTLTYPE_INT | CTLFLAG_RD, dev, CORE1_SENSOR1, amdtemp_sysctl, "IK", "Core 1 / Sensor 1 temperature"); } } /* * Try to create dev.cpu sysctl entries and setup intrhook function. * This is needed because the cpu driver may be loaded late on boot, * after us. */ amdtemp_intrhook(dev); sc->sc_ich.ich_func = amdtemp_intrhook; sc->sc_ich.ich_arg = dev; if (config_intrhook_establish(&sc->sc_ich) != 0) { device_printf(dev, "config_intrhook_establish failed!\n"); return (ENXIO); } return (0); } void amdtemp_intrhook(void *arg) { struct amdtemp_softc *sc; struct sysctl_ctx_list *sysctlctx; device_t dev = (device_t)arg; device_t acpi, cpu, nexus; amdsensor_t sensor; int i; sc = device_get_softc(dev); /* * dev.cpu.N.temperature. */ nexus = device_find_child(root_bus, "nexus", 0); acpi = device_find_child(nexus, "acpi", 0); for (i = 0; i < sc->sc_ncores; i++) { if (sc->sc_sysctl_cpu[i] != NULL) continue; cpu = device_find_child(acpi, "cpu", device_get_unit(dev) * sc->sc_ncores + i); if (cpu != NULL) { sysctlctx = device_get_sysctl_ctx(cpu); sensor = sc->sc_ntemps > 1 ? (i == 0 ? CORE0 : CORE1) : CORE0_SENSOR0; sc->sc_sysctl_cpu[i] = SYSCTL_ADD_PROC(sysctlctx, SYSCTL_CHILDREN(device_get_sysctl_tree(cpu)), OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD, dev, sensor, amdtemp_sysctl, "IK", "Current temparature"); } } if (sc->sc_ich.ich_arg != NULL) config_intrhook_disestablish(&sc->sc_ich); } int amdtemp_detach(device_t dev) { struct amdtemp_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->sc_ncores; i++) if (sc->sc_sysctl_cpu[i] != NULL) sysctl_remove_oid(sc->sc_sysctl_cpu[i], 1, 0); /* NewBus removes the dev.amdtemp.N tree by itself. */ return (0); } static int amdtemp_sysctl(SYSCTL_HANDLER_ARGS) { device_t dev = (device_t)arg1; struct amdtemp_softc *sc = device_get_softc(dev); amdsensor_t sensor = (amdsensor_t)arg2; int32_t auxtemp[2], temp; int error; switch (sensor) { case CORE0: auxtemp[0] = sc->sc_gettemp(dev, CORE0_SENSOR0); auxtemp[1] = sc->sc_gettemp(dev, CORE0_SENSOR1); temp = imax(auxtemp[0], auxtemp[1]); break; case CORE1: auxtemp[0] = sc->sc_gettemp(dev, CORE1_SENSOR0); auxtemp[1] = sc->sc_gettemp(dev, CORE1_SENSOR1); temp = imax(auxtemp[0], auxtemp[1]); break; default: temp = sc->sc_gettemp(dev, sensor); break; } error = sysctl_handle_int(oidp, &temp, 0, req); return (error); } #define AMDTEMP_ZERO_C_TO_K 2731 static int32_t amdtemp_gettemp0f(device_t dev, amdsensor_t sensor) { struct amdtemp_softc *sc = device_get_softc(dev); uint32_t mask, offset, temp; /* Set Sensor/Core selector. */ temp = pci_read_config(dev, AMDTEMP_THERMTP_STAT, 1); temp &= ~(AMDTEMP_TTSR_SELCORE | AMDTEMP_TTSR_SELSENSOR); switch (sensor) { case CORE0_SENSOR1: temp |= AMDTEMP_TTSR_SELSENSOR; /* FALLTHROUGH */ case CORE0_SENSOR0: case CORE0: if ((sc->sc_flags & AMDTEMP_FLAG_CS_SWAP) != 0) temp |= AMDTEMP_TTSR_SELCORE; break; case CORE1_SENSOR1: temp |= AMDTEMP_TTSR_SELSENSOR; /* FALLTHROUGH */ case CORE1_SENSOR0: case CORE1: if ((sc->sc_flags & AMDTEMP_FLAG_CS_SWAP) == 0) temp |= AMDTEMP_TTSR_SELCORE; break; } pci_write_config(dev, AMDTEMP_THERMTP_STAT, temp, 1); mask = (sc->sc_flags & AMDTEMP_FLAG_CT_10BIT) != 0 ? 0x3ff : 0x3fc; offset = (sc->sc_flags & AMDTEMP_FLAG_ALT_OFFSET) != 0 ? 28 : 49; temp = pci_read_config(dev, AMDTEMP_THERMTP_STAT, 4); temp = ((temp >> 14) & mask) * 5 / 2; temp += AMDTEMP_ZERO_C_TO_K + (sc->sc_offset - offset) * 10; return (temp); } static int32_t amdtemp_gettemp(device_t dev, amdsensor_t sensor) { struct amdtemp_softc *sc = device_get_softc(dev); uint32_t temp; temp = pci_read_config(dev, AMDTEMP_REPTMP_CTRL, 4); temp = ((temp >> 21) & 0x7ff) * 5 / 4; temp += AMDTEMP_ZERO_C_TO_K + sc->sc_offset * 10; return (temp); } static int32_t amdtemp_gettemp17h(device_t dev, amdsensor_t sensor) { struct amdtemp_softc *sc = device_get_softc(dev); - uint32_t temp; + uint32_t temp, val; int error; - error = amdsmn_read(sc->sc_smn, AMDTEMP_17H_CUR_TMP, &temp); + error = amdsmn_read(sc->sc_smn, AMDTEMP_17H_CUR_TMP, &val); KASSERT(error == 0, ("amdsmn_read")); - temp = ((temp >> 21) & 0x7ff) * 5 / 4; + temp = ((val >> 21) & 0x7ff) * 5 / 4; + if ((val & AMDTEMP_17H_CUR_TMP_RANGE_SEL) != 0) + temp -= AMDTEMP_17H_CUR_TMP_RANGE_OFF; temp += AMDTEMP_ZERO_C_TO_K + sc->sc_offset * 10; return (temp); } Index: projects/clang700-import/sys/dev/cxgbe/t4_netmap.c =================================================================== --- projects/clang700-import/sys/dev/cxgbe/t4_netmap.c (revision 340426) +++ projects/clang700-import/sys/dev/cxgbe/t4_netmap.c (revision 340427) @@ -1,1134 +1,1137 @@ /*- * Copyright (c) 2014 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #ifdef DEV_NETMAP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common/common.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" extern int fl_pad; /* XXXNM */ SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters"); /* * 0 = normal netmap rx * 1 = black hole * 2 = supermassive black hole (buffer packing enabled) */ int black_hole = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0, "Sink incoming packets."); int rx_ndesc = 256; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); int rx_nframes = 64; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN, &rx_nframes, 0, "max # of frames received before waking up netmap rx."); int holdoff_tmr_idx = 2; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); /* * Congestion drops. * -1: no congestion feedback (not recommended). * 0: backpressure the channel instead of dropping packets right away. * 1: no backpressure, drop packets for the congested queue immediately. */ static int nm_cong_drop = 1; TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop); int starve_fl = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN, &starve_fl, 0, "Don't ring fl db for netmap rx queues."); /* * Try to process tx credits in bulk. This may cause a delay in the return of * tx credits and is suitable for bursty or non-stop tx only. */ int lazy_tx_credit_flush = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN, &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues."); /* * Split the netmap rx queues into two groups that populate separate halves of * the RSS indirection table. This allows filters with hashmask to steer to a * particular group of queues. */ static int nm_split_rss = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN, &nm_split_rss, 0, "Split the netmap rx queues into two groups."); static int alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) { int rc, cntxt_id, i; __be32 v; struct adapter *sc = vi->pi->adapter; struct sge_params *sp = &sc->params.sge; struct netmap_adapter *na = NA(vi->ifp); struct fw_iq_cmd c; MPASS(na != NULL); MPASS(nm_rxq->iq_desc != NULL); MPASS(nm_rxq->fl_desc != NULL); bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | V_FW_IQ_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); MPASS(!forwarding_intr_to_fwq(sc)); KASSERT(nm_rxq->intr_idx < sc->intr_count, ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); c.type_to_iqandstindex = htobe32(v | V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | V_FW_IQ_CMD_VIID(vi->viid) | V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(0) | V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); c.iqsize = htobe16(vi->qsize_rxq); c.iqaddr = htobe64(nm_rxq->iq_ba); if (cong >= 0) { c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | F_FW_IQ_CMD_FL0CONGEN); } c.iqns_to_fl0congen |= htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); c.fl0dcaen_to_fl0cidxfthresh = htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); c.fl0addr = htobe64(nm_rxq->fl_ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(sc->dev, "failed to create netmap ingress queue: %d\n", rc); return (rc); } nm_rxq->iq_cidx = 0; MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); nm_rxq->iq_gen = F_RSPD_GEN; nm_rxq->iq_cntxt_id = be16toh(c.iqid); nm_rxq->iq_abs_id = be16toh(c.physiqid); cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; if (cntxt_id >= sc->sge.niq) { panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.niq - 1); } sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; nm_rxq->fl_cntxt_id = be16toh(c.fl0id); nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; MPASS(nm_rxq->fl_sidx == na->num_rx_desc); cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) { panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); } sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | sc->chip_params->sge_fl_db; if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { uint32_t param, val; param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); if (cong == 0) val = 1 << 19; else { val = 2 << 19; for (i = 0; i < 4; i++) { if (cong & (1 << i)) val |= 1 << (i << 2); } } rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { /* report error but carry on */ device_printf(sc->dev, "failed to set congestion manager context for " "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); } } t4_write_reg(sc, sc->sge_gts_reg, V_INGRESSQID(nm_rxq->iq_cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); return (rc); } static int free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) { struct adapter *sc = vi->pi->adapter; int rc; rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); if (rc != 0) device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; return (rc); } static int alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) { int rc, cntxt_id; size_t len; struct adapter *sc = vi->pi->adapter; struct netmap_adapter *na = NA(vi->ifp); struct fw_eq_eth_cmd c; MPASS(na != NULL); MPASS(nm_txq->desc != NULL); len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; bzero(nm_txq->desc, len); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); c.fetchszm_to_iqid = htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); c.eqaddr = htobe64(nm_txq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(vi->dev, "failed to create netmap egress queue: %d\n", rc); return (rc); } nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = (void *)nm_txq; nm_txq->pidx = nm_txq->cidx = 0; MPASS(nm_txq->sidx == na->num_tx_desc); nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; nm_txq->doorbells = sc->doorbells; if (isset(&nm_txq->doorbells, DOORBELL_UDB) || isset(&nm_txq->doorbells, DOORBELL_UDBWC) || isset(&nm_txq->doorbells, DOORBELL_WCWR)) { uint32_t s_qpp = sc->params.sge.eq_s_qpp; uint32_t mask = (1 << s_qpp) - 1; volatile uint8_t *udb; udb = sc->udbs_base + UDBS_DB_OFFSET; udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; nm_txq->udb_qid = nm_txq->cntxt_id & mask; if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) clrbit(&nm_txq->doorbells, DOORBELL_WCWR); else { udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; nm_txq->udb_qid = 0; } nm_txq->udb = (volatile void *)udb; } return (rc); } static int free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) { struct adapter *sc = vi->pi->adapter; int rc; rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); if (rc != 0) device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, nm_txq->cntxt_id, rc); nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; return (rc); } static int cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, struct netmap_adapter *na) { struct netmap_slot *slot; struct netmap_kring *kring; struct sge_nm_rxq *nm_rxq; struct sge_nm_txq *nm_txq; int rc, i, j, hwidx, defq, nrssq; struct hw_buf_info *hwb; ASSERT_SYNCHRONIZED_OP(sc); if ((vi->flags & VI_INIT_DONE) == 0 || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return (EAGAIN); hwb = &sc->sge.hw_buf_info[0]; for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { if (hwb->size == NETMAP_BUF_SIZE(na)) break; } if (i >= SGE_FLBUF_SIZES) { if_printf(ifp, "no hwidx for netmap buffer size %d.\n", NETMAP_BUF_SIZE(na)); return (ENXIO); } hwidx = i; /* Must set caps before calling netmap_reset */ nm_set_native_flags(na); for_each_nm_rxq(vi, i, nm_rxq) { kring = na->rx_rings[nm_rxq->nid]; if (!nm_kring_pending_on(kring) || nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) continue; alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); nm_rxq->fl_hwidx = hwidx; slot = netmap_reset(na, NR_RX, i, 0); MPASS(slot != NULL); /* XXXNM: error check, not assert */ /* We deal with 8 bufs at a time */ MPASS((na->num_rx_desc & 7) == 0); MPASS(na->num_rx_desc == nm_rxq->fl_sidx); for (j = 0; j < nm_rxq->fl_sidx; j++) { uint64_t ba; PNMB(na, &slot[j], &ba); MPASS(ba != 0); nm_rxq->fl_desc[j] = htobe64(ba | hwidx); } j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; MPASS((j & 7) == 0); j /= 8; /* driver pidx to hardware pidx */ wmb(); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(j)); (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON); } for_each_nm_txq(vi, i, nm_txq) { kring = na->tx_rings[nm_txq->nid]; if (!nm_kring_pending_on(kring) || nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) continue; alloc_nm_txq_hwq(vi, nm_txq); slot = netmap_reset(na, NR_TX, i, 0); MPASS(slot != NULL); /* XXXNM: error check, not assert */ } if (vi->nm_rss == NULL) { vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, M_ZERO | M_WAITOK); } MPASS(vi->nnmrxq > 0); if (nm_split_rss == 0 || vi->nnmrxq == 1) { for (i = 0; i < vi->rss_size;) { for_each_nm_rxq(vi, j, nm_rxq) { vi->nm_rss[i++] = nm_rxq->iq_abs_id; if (i == vi->rss_size) break; } } defq = vi->nm_rss[0]; } else { /* We have multiple queues and we want to split the table. */ MPASS(nm_split_rss != 0); MPASS(vi->nnmrxq > 1); nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq]; nrssq = vi->nnmrxq; if (vi->nnmrxq & 1) { /* * Odd number of queues. The first rxq is designated the * default queue, the rest are split evenly. */ defq = nm_rxq->iq_abs_id; nm_rxq++; nrssq--; } else { /* * Even number of queues split into two halves. The * first rxq in one of the halves is designated the * default queue. */ #if 1 /* First rxq in the first half. */ defq = nm_rxq->iq_abs_id; #else /* First rxq in the second half. */ defq = nm_rxq[vi->nnmrxq / 2].iq_abs_id; #endif } i = 0; while (i < vi->rss_size / 2) { for (j = 0; j < nrssq / 2; j++) { vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; if (i == vi->rss_size / 2) break; } } while (i < vi->rss_size) { for (j = nrssq / 2; j < nrssq; j++) { vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; if (i == vi->rss_size) break; } } } rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, vi->nm_rss, vi->rss_size); if (rc != 0) if_printf(ifp, "netmap rss_config failed: %d\n", rc); rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); if (rc != 0) if_printf(ifp, "netmap rss hash/defaultq config failed: %d\n", rc); return (rc); } static int cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, struct netmap_adapter *na) { struct netmap_kring *kring; int rc, i; struct sge_nm_txq *nm_txq; struct sge_nm_rxq *nm_rxq; ASSERT_SYNCHRONIZED_OP(sc); + if (!nm_netmap_on(na)) + return (0); + if ((vi->flags & VI_INIT_DONE) == 0) return (0); rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, vi->rss, vi->rss_size); if (rc != 0) if_printf(ifp, "failed to restore RSS config: %d\n", rc); rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0], 0, 0); if (rc != 0) if_printf(ifp, "failed to restore RSS hash/defaultq: %d\n", rc); nm_clear_native_flags(na); for_each_nm_txq(vi, i, nm_txq) { struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; kring = na->tx_rings[nm_txq->nid]; if (!nm_kring_pending_off(kring) || nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) continue; /* Wait for hw pidx to catch up ... */ while (be16toh(nm_txq->pidx) != spg->pidx) pause("nmpidx", 1); /* ... and then for the cidx. */ while (spg->pidx != spg->cidx) pause("nmcidx", 1); free_nm_txq_hwq(vi, nm_txq); } for_each_nm_rxq(vi, i, nm_rxq) { kring = na->rx_rings[nm_rxq->nid]; if (!nm_kring_pending_off(kring) || nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID) continue; while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF)) pause("nmst", 1); free_nm_rxq_hwq(vi, nm_rxq); } return (rc); } static int cxgbe_netmap_reg(struct netmap_adapter *na, int on) { struct ifnet *ifp = na->ifp; struct vi_info *vi = ifp->if_softc; struct adapter *sc = vi->pi->adapter; int rc; rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); if (rc != 0) return (rc); if (on) rc = cxgbe_netmap_on(sc, vi, ifp, na); else rc = cxgbe_netmap_off(sc, vi, ifp, na); end_synchronized_op(sc, 0); return (rc); } /* How many packets can a single type1 WR carry in n descriptors */ static inline int ndesc_to_npkt(const int n) { MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); return (n * 2 - 1); } #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) /* Space (in descriptors) needed for a type1 WR that carries n packets */ static inline int npkt_to_ndesc(const int n) { MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); return ((n + 2) / 2); } /* Space (in 16B units) needed for a type1 WR that carries n packets */ static inline int npkt_to_len16(const int n) { MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); return (n * 2 + 1); } #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) static void ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) { int n; u_int db = nm_txq->doorbells; MPASS(nm_txq->pidx != nm_txq->dbidx); n = NMIDXDIFF(nm_txq, dbidx); if (n > 1) clrbit(&db, DOORBELL_WCWR); wmb(); switch (ffs(db) - 1) { case DOORBELL_UDB: *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); break; case DOORBELL_WCWR: { volatile uint64_t *dst, *src; /* * Queues whose 128B doorbell segment fits in the page do not * use relative qid (udb_qid is always 0). Only queues with * doorbell segments can do WCWR. */ KASSERT(nm_txq->udb_qid == 0 && n == 1, ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); dst = (volatile void *)((uintptr_t)nm_txq->udb + UDBS_WR_OFFSET - UDBS_DB_OFFSET); src = (void *)&nm_txq->desc[nm_txq->dbidx]; while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) *dst++ = *src++; wmb(); break; } case DOORBELL_UDBWC: *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); wmb(); break; case DOORBELL_KDB: t4_write_reg(sc, sc->sge_kdoorbell_reg, V_QID(nm_txq->cntxt_id) | V_PIDX(n)); break; } nm_txq->dbidx = nm_txq->pidx; } /* * Write work requests to send 'npkt' frames and ring the doorbell to send them * on their way. No need to check for wraparound. */ static void cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum) { struct netmap_ring *ring = kring->ring; struct netmap_slot *slot; const u_int lim = kring->nkr_num_slots - 1; struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; uint16_t len; uint64_t ba; struct cpl_tx_pkt_core *cpl; struct ulptx_sgl *usgl; int i, n; while (npkt) { n = min(npkt, MAX_NPKT_IN_TYPE1_WR); len = 0; wr = (void *)&nm_txq->desc[nm_txq->pidx]; wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); wr->npkt = n; wr->r3 = 0; wr->type = 1; cpl = (void *)(wr + 1); for (i = 0; i < n; i++) { slot = &ring->slot[kring->nr_hwcur]; PNMB(kring->na, slot, &ba); MPASS(ba != 0); cpl->ctrl0 = nm_txq->cpl_ctrl0; cpl->pack = 0; cpl->len = htobe16(slot->len); /* * netmap(4) says "netmap does not use features such as * checksum offloading, TCP segmentation offloading, * encryption, VLAN encapsulation/decapsulation, etc." * * So the ncxl interfaces have tx hardware checksumming * disabled by default. But you can override netmap by * enabling IFCAP_TXCSUM on the interface manully. */ cpl->ctrl1 = txcsum ? 0 : htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); usgl = (void *)(cpl + 1); usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(1)); usgl->len0 = htobe32(slot->len); usgl->addr0 = htobe64(ba); slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); cpl = (void *)(usgl + 1); MPASS(slot->len + len <= UINT16_MAX); len += slot->len; kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); } wr->plen = htobe16(len); npkt -= n; nm_txq->pidx += npkt_to_ndesc(n); MPASS(nm_txq->pidx <= nm_txq->sidx); if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { /* * This routine doesn't know how to write WRs that wrap * around. Make sure it wasn't asked to. */ MPASS(npkt == 0); nm_txq->pidx = 0; } if (npkt == 0 && npkt_remaining == 0) { /* All done. */ if (lazy_tx_credit_flush == 0) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); nm_txq->equeqidx = nm_txq->pidx; nm_txq->equiqidx = nm_txq->pidx; } ring_nm_txq_db(sc, nm_txq); return; } if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); nm_txq->equeqidx = nm_txq->pidx; nm_txq->equiqidx = nm_txq->pidx; } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); nm_txq->equeqidx = nm_txq->pidx; } if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) ring_nm_txq_db(sc, nm_txq); } /* Will get called again. */ MPASS(npkt_remaining); } /* How many contiguous free descriptors starting at pidx */ static inline int contiguous_ndesc_available(struct sge_nm_txq *nm_txq) { if (nm_txq->cidx > nm_txq->pidx) return (nm_txq->cidx - nm_txq->pidx - 1); else if (nm_txq->cidx > 0) return (nm_txq->sidx - nm_txq->pidx); else return (nm_txq->sidx - nm_txq->pidx - 1); } static int reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) { struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; uint16_t hw_cidx = spg->cidx; /* snapshot */ struct fw_eth_tx_pkts_wr *wr; int n = 0; hw_cidx = be16toh(hw_cidx); while (nm_txq->cidx != hw_cidx) { wr = (void *)&nm_txq->desc[nm_txq->cidx]; MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR))); MPASS(wr->type == 1); MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); n += wr->npkt; nm_txq->cidx += npkt_to_ndesc(wr->npkt); /* * We never sent a WR that wrapped around so the credits coming * back, WR by WR, should never cause the cidx to wrap around * either. */ MPASS(nm_txq->cidx <= nm_txq->sidx); if (__predict_false(nm_txq->cidx == nm_txq->sidx)) nm_txq->cidx = 0; } return (n); } static int cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct vi_info *vi = ifp->if_softc; struct adapter *sc = vi->pi->adapter; struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; const u_int head = kring->rhead; u_int reclaimed = 0; int n, d, npkt_remaining, ndesc_remaining, txcsum; /* * Tx was at kring->nr_hwcur last time around and now we need to advance * to kring->rhead. Note that the driver's pidx moves independent of * netmap's kring->nr_hwcur (pidx counts descriptors and the relation * between descriptors and frames isn't 1:1). */ npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : kring->nkr_num_slots - kring->nr_hwcur + head; txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); while (npkt_remaining) { reclaimed += reclaim_nm_tx_desc(nm_txq); ndesc_remaining = contiguous_ndesc_available(nm_txq); /* Can't run out of descriptors with packets still remaining */ MPASS(ndesc_remaining > 0); /* # of desc needed to tx all remaining packets */ d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); if (d <= ndesc_remaining) n = npkt_remaining; else { /* Can't send all, calculate how many can be sent */ n = (ndesc_remaining / SGE_MAX_WR_NDESC) * MAX_NPKT_IN_TYPE1_WR; if (ndesc_remaining % SGE_MAX_WR_NDESC) n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); } /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ npkt_remaining -= n; cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum); } MPASS(npkt_remaining == 0); MPASS(kring->nr_hwcur == head); MPASS(nm_txq->dbidx == nm_txq->pidx); /* * Second part: reclaim buffers for completed transmissions. */ if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { reclaimed += reclaim_nm_tx_desc(nm_txq); kring->nr_hwtail += reclaimed; if (kring->nr_hwtail >= kring->nkr_num_slots) kring->nr_hwtail -= kring->nkr_num_slots; } return (0); } static int cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; struct ifnet *ifp = na->ifp; struct vi_info *vi = ifp->if_softc; struct adapter *sc = vi->pi->adapter; struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; u_int const head = kring->rhead; u_int n; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; if (black_hole) return (0); /* No updates ever. */ if (netmap_no_pendintr || force_update) { kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); kring->nr_kflags &= ~NKR_PENDINTR; } if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) { wmb(); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved)); nm_rxq->fl_db_saved = 0; } /* Userspace done with buffers from kring->nr_hwcur to head */ n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : kring->nkr_num_slots - kring->nr_hwcur + head; n &= ~7U; if (n > 0) { u_int fl_pidx = nm_rxq->fl_pidx; struct netmap_slot *slot = &ring->slot[fl_pidx]; uint64_t ba; int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; /* * We always deal with 8 buffers at a time. We must have * stopped at an 8B boundary (fl_pidx) last time around and we * must have a multiple of 8B buffers to give to the freelist. */ MPASS((fl_pidx & 7) == 0); MPASS((n & 7) == 0); IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx); while (n > 0) { for (i = 0; i < 8; i++, fl_pidx++, slot++) { PNMB(na, slot, &ba); MPASS(ba != 0); nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); slot->flags &= ~NS_BUF_CHANGED; MPASS(fl_pidx <= nm_rxq->fl_sidx); } n -= 8; if (fl_pidx == nm_rxq->fl_sidx) { fl_pidx = 0; slot = &ring->slot[0]; } if (++dbinc == 8 && n >= 32) { wmb(); if (starve_fl) nm_rxq->fl_db_saved += dbinc; else { t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(dbinc)); } dbinc = 0; } } MPASS(nm_rxq->fl_pidx == fl_pidx); if (dbinc > 0) { wmb(); if (starve_fl) nm_rxq->fl_db_saved += dbinc; else { t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(dbinc)); } } } return (0); } void cxgbe_nm_attach(struct vi_info *vi) { struct port_info *pi; struct adapter *sc; struct netmap_adapter na; MPASS(vi->nnmrxq > 0); MPASS(vi->ifp != NULL); pi = vi->pi; sc = pi->adapter; bzero(&na, sizeof(na)); na.ifp = vi->ifp; na.na_flags = NAF_BDG_MAYSLEEP; /* Netmap doesn't know about the space reserved for the status page. */ na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; /* * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So * num_rx_desc is based on the number of buffers that can be held in the * freelist, and not the number of entries in the iq. (These two are * not exactly the same due to the space taken up by the status page). */ na.num_rx_desc = rounddown(vi->qsize_rxq, 8); na.nm_txsync = cxgbe_netmap_txsync; na.nm_rxsync = cxgbe_netmap_rxsync; na.nm_register = cxgbe_netmap_reg; na.num_tx_rings = vi->nnmtxq; na.num_rx_rings = vi->nnmrxq; netmap_attach(&na); } void cxgbe_nm_detach(struct vi_info *vi) { MPASS(vi->nnmrxq > 0); MPASS(vi->ifp != NULL); netmap_detach(vi->ifp); } static inline const void * unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) { MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); /* data[0] is RSS header */ return (&cpl->data[1]); } static void handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, const struct cpl_sge_egr_update *egr) { uint32_t oq; struct sge_nm_txq *nm_txq; oq = be32toh(egr->opcode_qid); MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; netmap_tx_irq(ifp, nm_txq->nid); } void service_nm_rxq(struct sge_nm_rxq *nm_rxq) { struct vi_info *vi = nm_rxq->vi; struct adapter *sc = vi->pi->adapter; struct ifnet *ifp = vi->ifp; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; struct netmap_ring *ring = kring->ring; struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; const void *cpl; uint32_t lq; u_int work = 0; uint8_t opcode; uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); u_int fl_credits = fl_cidx & 7; u_int ndesc = 0; /* desc processed since last cidx update */ u_int nframes = 0; /* frames processed since last netmap wakeup */ while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { rmb(); lq = be32toh(d->rsp.pldbuflen_qid); opcode = d->rss.opcode; cpl = &d->cpl[0]; switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { case X_RSPD_TYPE_FLBUF: /* fall through */ case X_RSPD_TYPE_CPL: MPASS(opcode < NUM_CPL_CMDS); switch (opcode) { case CPL_FW4_MSG: case CPL_FW6_MSG: cpl = unwrap_nm_fw6_msg(cpl); /* fall through */ case CPL_SGE_EGR_UPDATE: handle_nm_sge_egr_update(sc, ifp, cpl); break; case CPL_RX_PKT: ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - sc->params.sge.fl_pktshift; ring->slot[fl_cidx].flags = 0; nframes++; if (!(lq & F_RSPD_NEWBUF)) { MPASS(black_hole == 2); break; } fl_credits++; if (__predict_false(++fl_cidx == nm_rxq->fl_sidx)) fl_cidx = 0; break; default: panic("%s: unexpected opcode 0x%x on nm_rxq %p", __func__, opcode, nm_rxq); } break; case X_RSPD_TYPE_INTR: /* Not equipped to handle forwarded interrupts. */ panic("%s: netmap queue received interrupt for iq %u\n", __func__, lq); default: panic("%s: illegal response type %d on nm_rxq %p", __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); } d++; if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { nm_rxq->iq_cidx = 0; d = &nm_rxq->iq_desc[0]; nm_rxq->iq_gen ^= F_RSPD_GEN; } if (__predict_false(++nframes == rx_nframes) && !black_hole) { atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); netmap_rx_irq(ifp, nm_rxq->nid, &work); nframes = 0; } if (__predict_false(++ndesc == rx_ndesc)) { if (black_hole && fl_credits >= 8) { fl_credits /= 8; IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(fl_credits)); fl_credits = fl_cidx & 7; } t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | V_INGRESSQID(nm_rxq->iq_cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); ndesc = 0; } } atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); if (black_hole) { fl_credits /= 8; IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(fl_credits)); } else if (nframes > 0) netmap_rx_irq(ifp, nm_rxq->nid, &work); t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); } #endif Index: projects/clang700-import/sys/dev/nvme/nvme_ns.c =================================================================== --- projects/clang700-import/sys/dev/nvme/nvme_ns.c (revision 340426) +++ projects/clang700-import/sys/dev/nvme/nvme_ns.c (revision 340427) @@ -1,617 +1,617 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2012-2013 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "nvme_private.h" static void nvme_bio_child_inbed(struct bio *parent, int bio_error); static void nvme_bio_child_done(void *arg, const struct nvme_completion *cpl); static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t alignment); static void nvme_free_child_bios(int num_bios, struct bio **child_bios); static struct bio ** nvme_allocate_child_bios(int num_bios); static struct bio ** nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios); static int nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp, uint32_t alignment); static int nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct nvme_namespace *ns; struct nvme_controller *ctrlr; struct nvme_pt_command *pt; ns = cdev->si_drv1; ctrlr = ns->ctrlr; switch (cmd) { case NVME_IO_TEST: case NVME_BIO_TEST: nvme_ns_test(ns, cmd, arg); break; case NVME_PASSTHROUGH_CMD: pt = (struct nvme_pt_command *)arg; return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id, 1 /* is_user_buffer */, 0 /* is_admin_cmd */)); case DIOCGMEDIASIZE: *(off_t *)arg = (off_t)nvme_ns_get_size(ns); break; case DIOCGSECTORSIZE: *(u_int *)arg = nvme_ns_get_sector_size(ns); break; default: return (ENOTTY); } return (0); } static int nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused, struct thread *td) { int error = 0; if (flags & FWRITE) error = securelevel_gt(td->td_ucred, 0); return (error); } static int nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused, struct thread *td) { return (0); } static void nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl) { struct bio *bp = arg; /* * TODO: add more extensive translation of NVMe status codes * to different bio error codes (i.e. EIO, EINVAL, etc.) */ if (nvme_completion_is_error(cpl)) { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; } else bp->bio_resid = 0; biodone(bp); } static void nvme_ns_strategy(struct bio *bp) { struct nvme_namespace *ns; int err; ns = bp->bio_dev->si_drv1; err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done); if (err) { bp->bio_error = err; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; biodone(bp); } } static struct cdevsw nvme_ns_cdevsw = { .d_version = D_VERSION, .d_flags = D_DISK, .d_read = physread, .d_write = physwrite, .d_open = nvme_ns_open, .d_close = nvme_ns_close, .d_strategy = nvme_ns_strategy, .d_ioctl = nvme_ns_ioctl }; uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns) { return ns->ctrlr->max_xfer_size; } uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns) { uint8_t flbas_fmt, lbads; flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & NVME_NS_DATA_FLBAS_FORMAT_MASK; lbads = (ns->data.lbaf[flbas_fmt] >> NVME_NS_DATA_LBAF_LBADS_SHIFT) & NVME_NS_DATA_LBAF_LBADS_MASK; return (1 << lbads); } uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns) { return (ns->data.nsze); } uint64_t nvme_ns_get_size(struct nvme_namespace *ns) { return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns)); } uint32_t nvme_ns_get_flags(struct nvme_namespace *ns) { return (ns->flags); } const char * nvme_ns_get_serial_number(struct nvme_namespace *ns) { return ((const char *)ns->ctrlr->cdata.sn); } const char * nvme_ns_get_model_number(struct nvme_namespace *ns) { return ((const char *)ns->ctrlr->cdata.mn); } const struct nvme_namespace_data * nvme_ns_get_data(struct nvme_namespace *ns) { return (&ns->data); } uint32_t nvme_ns_get_stripesize(struct nvme_namespace *ns) { return (ns->stripesize); } static void nvme_ns_bio_done(void *arg, const struct nvme_completion *status) { struct bio *bp = arg; nvme_cb_fn_t bp_cb_fn; bp_cb_fn = bp->bio_driver1; if (bp->bio_driver2) free(bp->bio_driver2, M_NVME); if (nvme_completion_is_error(status)) { bp->bio_flags |= BIO_ERROR; if (bp->bio_error == 0) bp->bio_error = EIO; } if ((bp->bio_flags & BIO_ERROR) == 0) bp->bio_resid = 0; else bp->bio_resid = bp->bio_bcount; bp_cb_fn(bp, status); } static void nvme_bio_child_inbed(struct bio *parent, int bio_error) { struct nvme_completion parent_cpl; int children, inbed; if (bio_error != 0) { parent->bio_flags |= BIO_ERROR; parent->bio_error = bio_error; } /* * atomic_fetchadd will return value before adding 1, so we still * must add 1 to get the updated inbed number. Save bio_children * before incrementing to guard against race conditions when * two children bios complete on different queues. */ children = atomic_load_acq_int(&parent->bio_children); inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1; if (inbed == children) { bzero(&parent_cpl, sizeof(parent_cpl)); if (parent->bio_flags & BIO_ERROR) { parent_cpl.status &= ~(NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT); parent_cpl.status |= (NVME_SC_DATA_TRANSFER_ERROR) << NVME_STATUS_SC_SHIFT; } nvme_ns_bio_done(parent, &parent_cpl); } } static void nvme_bio_child_done(void *arg, const struct nvme_completion *cpl) { struct bio *child = arg; struct bio *parent; int bio_error; parent = child->bio_parent; g_destroy_bio(child); bio_error = nvme_completion_is_error(cpl) ? EIO : 0; nvme_bio_child_inbed(parent, bio_error); } static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align) { uint32_t num_segs, offset, remainder; if (align == 0) return (1); KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n")); num_segs = size / align; remainder = size & (align - 1); offset = addr & (align - 1); if (remainder > 0 || offset > 0) num_segs += 1 + (remainder + offset - 1) / align; return (num_segs); } static void nvme_free_child_bios(int num_bios, struct bio **child_bios) { int i; for (i = 0; i < num_bios; i++) { if (child_bios[i] != NULL) g_destroy_bio(child_bios[i]); } free(child_bios, M_NVME); } static struct bio ** nvme_allocate_child_bios(int num_bios) { struct bio **child_bios; int err = 0, i; child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT); if (child_bios == NULL) return (NULL); for (i = 0; i < num_bios; i++) { child_bios[i] = g_new_bio(); if (child_bios[i] == NULL) err = ENOMEM; } if (err == ENOMEM) { nvme_free_child_bios(num_bios, child_bios); return (NULL); } return (child_bios); } static struct bio ** nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios) { struct bio **child_bios; struct bio *child; uint64_t cur_offset; caddr_t data; uint32_t rem_bcount; int i; #ifdef NVME_UNMAPPED_BIO_SUPPORT struct vm_page **ma; uint32_t ma_offset; #endif *num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount, alignment); child_bios = nvme_allocate_child_bios(*num_bios); if (child_bios == NULL) return (NULL); bp->bio_children = *num_bios; bp->bio_inbed = 0; cur_offset = bp->bio_offset; rem_bcount = bp->bio_bcount; data = bp->bio_data; #ifdef NVME_UNMAPPED_BIO_SUPPORT ma_offset = bp->bio_ma_offset; ma = bp->bio_ma; #endif for (i = 0; i < *num_bios; i++) { child = child_bios[i]; child->bio_parent = bp; child->bio_cmd = bp->bio_cmd; child->bio_offset = cur_offset; child->bio_bcount = min(rem_bcount, alignment - (cur_offset & (alignment - 1))); child->bio_flags = bp->bio_flags; #ifdef NVME_UNMAPPED_BIO_SUPPORT if (bp->bio_flags & BIO_UNMAPPED) { child->bio_ma_offset = ma_offset; child->bio_ma = ma; child->bio_ma_n = nvme_get_num_segments(child->bio_ma_offset, child->bio_bcount, PAGE_SIZE); ma_offset = (ma_offset + child->bio_bcount) & PAGE_MASK; ma += child->bio_ma_n; if (ma_offset != 0) ma -= 1; } else #endif { child->bio_data = data; data += child->bio_bcount; } cur_offset += child->bio_bcount; rem_bcount -= child->bio_bcount; } return (child_bios); } static int nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp, uint32_t alignment) { struct bio *child; struct bio **child_bios; int err, i, num_bios; child_bios = nvme_construct_child_bios(bp, alignment, &num_bios); if (child_bios == NULL) return (ENOMEM); for (i = 0; i < num_bios; i++) { child = child_bios[i]; err = nvme_ns_bio_process(ns, child, nvme_bio_child_done); if (err != 0) { nvme_bio_child_inbed(bp, err); g_destroy_bio(child); } } free(child_bios, M_NVME); return (0); } int nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp, nvme_cb_fn_t cb_fn) { struct nvme_dsm_range *dsm_range; uint32_t num_bios; int err; bp->bio_driver1 = cb_fn; if (ns->stripesize > 0 && (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) { num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount, ns->stripesize); if (num_bios > 1) return (nvme_ns_split_bio(ns, bp, ns->stripesize)); } switch (bp->bio_cmd) { case BIO_READ: err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp); break; case BIO_WRITE: err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp); break; case BIO_FLUSH: err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp); break; case BIO_DELETE: dsm_range = malloc(sizeof(struct nvme_dsm_range), M_NVME, M_ZERO | M_WAITOK); if (!dsm_range) { err = ENOMEM; break; } dsm_range->length = htole32(bp->bio_bcount/nvme_ns_get_sector_size(ns)); dsm_range->starting_lba = htole64(bp->bio_offset/nvme_ns_get_sector_size(ns)); bp->bio_driver2 = dsm_range; err = nvme_ns_cmd_deallocate(ns, dsm_range, 1, nvme_ns_bio_done, bp); if (err != 0) free(dsm_range, M_NVME); break; default: err = EIO; break; } return (err); } int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, struct nvme_controller *ctrlr) { struct make_dev_args md_args; struct nvme_completion_poll_status status; int res; int unit; uint16_t oncs; uint8_t dsm; uint8_t flbas_fmt; uint8_t vwc_present; ns->ctrlr = ctrlr; ns->id = id; ns->stripesize = 0; /* * Older Intel devices advertise in vendor specific space an alignment * that improves performance. If present use for the stripe size. NVMe * 1.3 standardized this as NOIOB, and newer Intel drives use that. */ switch (pci_get_devid(ctrlr->dev)) { case 0x09538086: /* Intel DC PC3500 */ case 0x0a538086: /* Intel DC PC3520 */ case 0x0a548086: /* Intel DC PC4500 */ if (ctrlr->cdata.vs[3] != 0) ns->stripesize = (1 << ctrlr->cdata.vs[3]) * ctrlr->min_page_size; break; default: break; } /* * Namespaces are reconstructed after a controller reset, so check * to make sure we only call mtx_init once on each mtx. * * TODO: Move this somewhere where it gets called at controller * construction time, which is not invoked as part of each * controller reset. */ if (!mtx_initialized(&ns->lock)) mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF); - status.done = FALSE; + status.done = 0; nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data, nvme_completion_poll_cb, &status); - while (status.done == FALSE) - DELAY(5); + while (!atomic_load_acq_int(&status.done)) + pause("nvme", 1); if (nvme_completion_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_identify_namespace failed\n"); return (ENXIO); } /* Convert data to host endian */ nvme_namespace_data_swapbytes(&ns->data); /* * If the size of is zero, chances are this isn't a valid * namespace (eg one that's not been configured yet). The * standard says the entire id will be zeros, so this is a * cheap way to test for that. */ if (ns->data.nsze == 0) return (ENXIO); flbas_fmt = (ns->data.flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & NVME_NS_DATA_FLBAS_FORMAT_MASK; /* * Note: format is a 0-based value, so > is appropriate here, * not >=. */ if (flbas_fmt > ns->data.nlbaf) { printf("lba format %d exceeds number supported (%d)\n", flbas_fmt, ns->data.nlbaf + 1); return (ENXIO); } oncs = ctrlr->cdata.oncs; dsm = (oncs >> NVME_CTRLR_DATA_ONCS_DSM_SHIFT) & NVME_CTRLR_DATA_ONCS_DSM_MASK; if (dsm) ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED; vwc_present = (ctrlr->cdata.vwc >> NVME_CTRLR_DATA_VWC_PRESENT_SHIFT) & NVME_CTRLR_DATA_VWC_PRESENT_MASK; if (vwc_present) ns->flags |= NVME_NS_FLUSH_SUPPORTED; /* * cdev may have already been created, if we are reconstructing the * namespace after a controller-level reset. */ if (ns->cdev != NULL) return (0); /* * Namespace IDs start at 1, so we need to subtract 1 to create a * correct unit number. */ unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1; make_dev_args_init(&md_args); md_args.mda_devsw = &nvme_ns_cdevsw; md_args.mda_unit = unit; md_args.mda_mode = 0600; md_args.mda_si_drv1 = ns; res = make_dev_s(&md_args, &ns->cdev, "nvme%dns%d", device_get_unit(ctrlr->dev), ns->id); if (res != 0) return (ENXIO); #ifdef NVME_UNMAPPED_BIO_SUPPORT ns->cdev->si_flags |= SI_UNMAPPED; #endif return (0); } void nvme_ns_destruct(struct nvme_namespace *ns) { if (ns->cdev != NULL) destroy_dev(ns->cdev); } Index: projects/clang700-import/sys/dev/tws/tws.c =================================================================== --- projects/clang700-import/sys/dev/tws/tws.c (revision 340426) +++ projects/clang700-import/sys/dev/tws/tws.c (revision 340427) @@ -1,908 +1,906 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2010, LSI Corp. * All rights reserved. * Author : Manjunath Ranganathaiah * Support: freebsdraid@lsi.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of the nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver"); int tws_queue_depth = TWS_MAX_REQS; int tws_enable_msi = 0; int tws_enable_msix = 0; /* externs */ extern int tws_cam_attach(struct tws_softc *sc); extern void tws_cam_detach(struct tws_softc *sc); extern int tws_init_ctlr(struct tws_softc *sc); extern boolean tws_ctlr_ready(struct tws_softc *sc); extern void tws_turn_off_interrupts(struct tws_softc *sc); extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern struct tws_request *tws_q_remove_request(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern struct tws_request *tws_q_remove_head(struct tws_softc *sc, u_int8_t q_type ); extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id); extern boolean tws_ctlr_reset(struct tws_softc *sc); extern void tws_intr(void *arg); extern int tws_use_32bit_sgls; struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type); int tws_init_connect(struct tws_softc *sc, u_int16_t mc); void tws_send_event(struct tws_softc *sc, u_int8_t event); uint8_t tws_get_state(struct tws_softc *sc); void tws_release_request(struct tws_request *req); /* Function prototypes */ static d_open_t tws_open; static d_close_t tws_close; static d_read_t tws_read; static d_write_t tws_write; extern d_ioctl_t tws_ioctl; static int tws_init(struct tws_softc *sc); static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size); static int tws_init_aen_q(struct tws_softc *sc); static int tws_init_trace_q(struct tws_softc *sc); static int tws_setup_irq(struct tws_softc *sc); int tws_setup_intr(struct tws_softc *sc, int irqs); int tws_teardown_intr(struct tws_softc *sc); /* Character device entry points */ static struct cdevsw tws_cdevsw = { .d_version = D_VERSION, .d_open = tws_open, .d_close = tws_close, .d_read = tws_read, .d_write = tws_write, .d_ioctl = tws_ioctl, .d_name = "tws", }; /* * In the cdevsw routines, we find our softc by using the si_drv1 member * of struct cdev. We set this variable to point to our softc in our * attach routine when we create the /dev entry. */ int tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, oflags); return (0); } int tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, fflag); return (0); } int tws_read(struct cdev *dev, struct uio *uio, int ioflag) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, ioflag); return (0); } int tws_write(struct cdev *dev, struct uio *uio, int ioflag) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, ioflag); return (0); } /* PCI Support Functions */ /* * Compare the device ID of this device against the IDs that this driver * supports. If there is a match, set the description and return success. */ static int tws_probe(device_t dev) { static u_int8_t first_ctlr = 1; if ((pci_get_vendor(dev) == TWS_VENDOR_ID) && (pci_get_device(dev) == TWS_DEVICE_ID)) { device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller"); if (first_ctlr) { printf("LSI 3ware device driver for SAS/SATA storage " "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING); first_ctlr = 0; } return(BUS_PROBE_DEFAULT); } return (ENXIO); } /* Attach function is only called if the probe is successful. */ static int tws_attach(device_t dev) { struct tws_softc *sc = device_get_softc(dev); u_int32_t bar; int error=0,i; /* no tracing yet */ /* Look up our softc and initialize its fields. */ sc->tws_dev = dev; sc->device_id = pci_get_device(dev); sc->subvendor_id = pci_get_subvendor(dev); sc->subdevice_id = pci_get_subdevice(dev); /* Intialize mutexes */ mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF); mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF); mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF); mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE); callout_init(&sc->stats_timer, 1); if ( tws_init_trace_q(sc) == FAILURE ) printf("trace init failure\n"); /* send init event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_START); mtx_unlock(&sc->gen_lock); #if _BYTE_ORDER == _BIG_ENDIAN TWS_TRACE(sc, "BIG endian", 0, 0); #endif /* sysctl context setup */ sysctl_ctx_init(&sc->tws_clist); sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if ( sc->tws_oidp == NULL ) { tws_log(sc, SYSCTL_TREE_NODE_ADD); goto attach_fail_1; } SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp), OID_AUTO, "driver_version", CTLFLAG_RD, TWS_DRIVER_VERSION_STRING, 0, "TWS driver version"); pci_enable_busmaster(dev); bar = pci_read_config(dev, TWS_PCI_BAR0, 4); TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0); bar = pci_read_config(dev, TWS_PCI_BAR1, 4); bar = bar & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0); /* MFA base address is BAR2 register used for * push mode. Firmware will evatualy move to * pull mode during witch this needs to change */ #ifndef TWS_PULL_MODE_ENABLE sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4); sc->mfa_base = sc->mfa_base & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0); #endif /* allocate MMIO register space */ sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */ if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &(sc->reg_res_id), RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_1; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); #ifndef TWS_PULL_MODE_ENABLE /* Allocate bus space for inbound mfa */ sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */ if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &(sc->mfa_res_id), RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_2; } sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res); sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res); #endif /* Allocate and register our interrupt. */ sc->intr_type = TWS_INTx; /* default */ if ( tws_enable_msi ) sc->intr_type = TWS_MSI; if ( tws_setup_irq(sc) == FAILURE ) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_3; } /* * Create a /dev entry for this device. The kernel will assign us * a major number automatically. We use the unit number of this * device as the minor number and name the character device * "tws". */ sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u", device_get_unit(dev)); sc->tws_cdev->si_drv1 = sc; if ( tws_init(sc) == FAILURE ) { tws_log(sc, TWS_INIT_FAILURE); goto attach_fail_4; } if ( tws_init_ctlr(sc) == FAILURE ) { tws_log(sc, TWS_CTLR_INIT_FAILURE); goto attach_fail_4; } if ((error = tws_cam_attach(sc))) { tws_log(sc, TWS_CAM_ATTACH); goto attach_fail_4; } /* send init complete event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_COMPLETE); mtx_unlock(&sc->gen_lock); TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id); return(0); attach_fail_4: tws_teardown_intr(sc); destroy_dev(sc->tws_cdev); if (sc->dma_mem_phys) bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); if (sc->dma_mem) bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); if (sc->cmd_tag) bus_dma_tag_destroy(sc->cmd_tag); attach_fail_3: for(i=0;iirqs;i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) TWS_TRACE(sc, "bus irq res", 0, 0); } } #ifndef TWS_PULL_MODE_ENABLE attach_fail_2: #endif if ( sc->mfa_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id); } if ( sc->reg_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id); } attach_fail_1: mtx_destroy(&sc->q_lock); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->gen_lock); mtx_destroy(&sc->io_lock); sysctl_ctx_free(&sc->tws_clist); return (ENXIO); } /* Detach device. */ static int tws_detach(device_t dev) { struct tws_softc *sc = device_get_softc(dev); int i; u_int32_t reg; TWS_TRACE_DEBUG(sc, "entry", 0, 0); mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_UNINIT_START); mtx_unlock(&sc->gen_lock); /* needs to disable interrupt before detaching from cam */ tws_turn_off_interrupts(sc); /* clear door bell */ tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4); reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4); TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0); sc->obfl_q_overrun = false; tws_init_connect(sc, 1); /* Teardown the state in our softc created in our attach routine. */ /* Disconnect the interrupt handler. */ tws_teardown_intr(sc); /* Release irq resource */ for(i=0;iirqs;i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) TWS_TRACE(sc, "bus release irq resource", i, sc->irq_res_id[i]); } } if ( sc->intr_type == TWS_MSI ) { pci_release_msi(sc->tws_dev); } tws_cam_detach(sc); if (sc->dma_mem_phys) bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); if (sc->dma_mem) bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); if (sc->cmd_tag) bus_dma_tag_destroy(sc->cmd_tag); /* Release memory resource */ if ( sc->mfa_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id); } if ( sc->reg_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id); } for ( i=0; i< tws_queue_depth; i++) { if (sc->reqs[i].dma_map) bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map); callout_drain(&sc->reqs[i].timeout); } callout_drain(&sc->stats_timer); free(sc->reqs, M_TWS); free(sc->sense_bufs, M_TWS); free(sc->scan_ccb, M_TWS); if (sc->ioctl_data_mem) bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map); if (sc->data_tag) bus_dma_tag_destroy(sc->data_tag); free(sc->aen_q.q, M_TWS); free(sc->trace_q.q, M_TWS); mtx_destroy(&sc->q_lock); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->gen_lock); mtx_destroy(&sc->io_lock); destroy_dev(sc->tws_cdev); sysctl_ctx_free(&sc->tws_clist); return (0); } int tws_setup_intr(struct tws_softc *sc, int irqs) { int i, error; for(i=0;iintr_handle[i])) { if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i], INTR_TYPE_CAM | INTR_MPSAFE, -#if (__FreeBSD_version >= 700000) NULL, -#endif tws_intr, sc, &sc->intr_handle[i]))) { tws_log(sc, SETUP_INTR_RES); return(FAILURE); } } } return(SUCCESS); } int tws_teardown_intr(struct tws_softc *sc) { int i, error; for(i=0;iirqs;i++) { if (sc->intr_handle[i]) { error = bus_teardown_intr(sc->tws_dev, sc->irq_res[i], sc->intr_handle[i]); sc->intr_handle[i] = NULL; } } return(SUCCESS); } static int tws_setup_irq(struct tws_softc *sc) { int messages; switch(sc->intr_type) { case TWS_INTx : sc->irqs = 1; sc->irq_res_id[0] = 0; sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); if ( ! sc->irq_res[0] ) return(FAILURE); if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) return(FAILURE); device_printf(sc->tws_dev, "Using legacy INTx\n"); break; case TWS_MSI : sc->irqs = 1; sc->irq_res_id[0] = 1; messages = 1; if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) { TWS_TRACE(sc, "pci alloc msi fail", 0, messages); return(FAILURE); } sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); if ( !sc->irq_res[0] ) return(FAILURE); if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) return(FAILURE); device_printf(sc->tws_dev, "Using MSI\n"); break; } return(SUCCESS); } static int tws_init(struct tws_softc *sc) { u_int32_t max_sg_elements; u_int32_t dma_mem_size; int error; u_int32_t reg; sc->seq_id = 0; if ( tws_queue_depth > TWS_MAX_REQS ) tws_queue_depth = TWS_MAX_REQS; if (tws_queue_depth < TWS_RESERVED_REQS+1) tws_queue_depth = TWS_RESERVED_REQS+1; sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false; max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ? TWS_MAX_64BIT_SG_ELEMENTS : TWS_MAX_32BIT_SG_ELEMENTS; dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) + (TWS_SECTOR_SIZE) ; if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */ TWS_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ max_sg_elements, /* numsegs */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->parent_tag /* tag */ )) { TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } /* In bound message frame requires 16byte alignment. * Outbound MF's can live with 4byte alignment - for now just * use 16 for both. */ if ( bus_dma_tag_create(sc->parent_tag, /* parent */ TWS_IN_MF_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_mem_size, /* maxsize */ 1, /* numsegs */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->cmd_tag /* tag */ )) { TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, BUS_DMA_NOWAIT, &sc->cmd_map)) { TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit); return(ENOMEM); } /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */ sc->dma_mem_phys=0; error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, dma_mem_size, tws_dmamap_cmds_load_cbfn, &sc->dma_mem_phys, 0); /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (128kB). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ TWS_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TWS_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TWS_MAX_IO_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->io_lock, /* lockfuncarg */ &sc->data_tag /* tag */)) { TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS, M_WAITOK | M_ZERO); sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS, M_WAITOK | M_ZERO); sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO); if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) { device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n"); return(ENOMEM); } if ( !tws_ctlr_ready(sc) ) if( !tws_ctlr_reset(sc) ) return(FAILURE); bzero(&sc->stats, sizeof(struct tws_stats)); tws_init_qs(sc); tws_turn_off_interrupts(sc); /* * enable pull mode by setting bit1 . * setting bit0 to 1 will enable interrupt coalesing * will revisit. */ #ifdef TWS_PULL_MODE_ENABLE reg = tws_read_reg(sc, TWS_I2O0_CTL, 4); TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL); tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4); #endif TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL); if ( tws_init_reqs(sc, dma_mem_size) == FAILURE ) return(FAILURE); if ( tws_init_aen_q(sc) == FAILURE ) return(FAILURE); return(SUCCESS); } static int tws_init_aen_q(struct tws_softc *sc) { sc->aen_q.head=0; sc->aen_q.tail=0; sc->aen_q.depth=256; sc->aen_q.overflow=0; sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth, M_TWS, M_WAITOK | M_ZERO); return(SUCCESS); } static int tws_init_trace_q(struct tws_softc *sc) { sc->trace_q.head=0; sc->trace_q.tail=0; sc->trace_q.depth=256; sc->trace_q.overflow=0; sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth, M_TWS, M_WAITOK | M_ZERO); return(SUCCESS); } static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size) { struct tws_command_packet *cmd_buf; cmd_buf = (struct tws_command_packet *)sc->dma_mem; int i; bzero(cmd_buf, dma_mem_size); TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0); mtx_lock(&sc->q_lock); for ( i=0; i< tws_queue_depth; i++) { if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) { /* log a ENOMEM failure msg here */ mtx_unlock(&sc->q_lock); return(FAILURE); } sc->reqs[i].cmd_pkt = &cmd_buf[i]; sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ; sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys + (i * sizeof(struct tws_command_packet)); sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys + sizeof(struct tws_command_header) + (i * sizeof(struct tws_command_packet)); sc->reqs[i].request_id = i; sc->reqs[i].sc = sc; sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128; callout_init(&sc->reqs[i].timeout, 1); sc->reqs[i].state = TWS_REQ_STATE_FREE; if ( i >= TWS_RESERVED_REQS ) tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q); } mtx_unlock(&sc->q_lock); return(SUCCESS); } static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error) { /* printf("command load done \n"); */ *((bus_addr_t *)arg) = segs[0].ds_addr; } void tws_send_event(struct tws_softc *sc, u_int8_t event) { mtx_assert(&sc->gen_lock, MA_OWNED); TWS_TRACE_DEBUG(sc, "received event ", 0, event); switch (event) { case TWS_INIT_START: sc->tws_state = TWS_INIT; break; case TWS_INIT_COMPLETE: if (sc->tws_state != TWS_INIT) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state); } else { sc->tws_state = TWS_ONLINE; } break; case TWS_RESET_START: /* We can transition to reset state from any state except reset*/ if (sc->tws_state != TWS_RESET) { sc->tws_prev_state = sc->tws_state; sc->tws_state = TWS_RESET; } break; case TWS_RESET_COMPLETE: if (sc->tws_state != TWS_RESET) { device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state); } else { sc->tws_state = sc->tws_prev_state; } break; case TWS_SCAN_FAILURE: if (sc->tws_state != TWS_ONLINE) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state); } else { sc->tws_state = TWS_OFFLINE; } break; case TWS_UNINIT_START: if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state); } else { sc->tws_state = TWS_UNINIT; } break; } } uint8_t tws_get_state(struct tws_softc *sc) { return((u_int8_t)sc->tws_state); } /* Called during system shutdown after sync. */ static int tws_shutdown(device_t dev) { struct tws_softc *sc = device_get_softc(dev); TWS_TRACE_DEBUG(sc, "entry", 0, 0); tws_turn_off_interrupts(sc); tws_init_connect(sc, 1); return (0); } /* * Device suspend routine. */ static int tws_suspend(device_t dev) { struct tws_softc *sc = device_get_softc(dev); if ( sc ) TWS_TRACE_DEBUG(sc, "entry", 0, 0); return (0); } /* * Device resume routine. */ static int tws_resume(device_t dev) { struct tws_softc *sc = device_get_softc(dev); if ( sc ) TWS_TRACE_DEBUG(sc, "entry", 0, 0); return (0); } struct tws_request * tws_get_request(struct tws_softc *sc, u_int16_t type) { struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock); struct tws_request *r = NULL; mtx_lock(my_mutex); if (type == TWS_REQ_TYPE_SCSI_IO) { r = tws_q_remove_head(sc, TWS_FREE_Q); } else { if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) { r = &sc->reqs[type]; } } if ( r ) { bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache)); r->data = NULL; r->length = 0; r->type = type; r->flags = TWS_DIR_UNKNOWN; r->error_code = TWS_REQ_RET_INVALID; r->cb = NULL; r->ccb_ptr = NULL; callout_stop(&r->timeout); r->next = r->prev = NULL; r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY); } mtx_unlock(my_mutex); return(r); } void tws_release_request(struct tws_request *req) { struct tws_softc *sc = req->sc; TWS_TRACE_DEBUG(sc, "entry", sc, 0); mtx_lock(&sc->q_lock); tws_q_insert_tail(sc, req, TWS_FREE_Q); mtx_unlock(&sc->q_lock); } static device_method_t tws_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tws_probe), DEVMETHOD(device_attach, tws_attach), DEVMETHOD(device_detach, tws_detach), DEVMETHOD(device_shutdown, tws_shutdown), DEVMETHOD(device_suspend, tws_suspend), DEVMETHOD(device_resume, tws_resume), DEVMETHOD_END }; static driver_t tws_driver = { "tws", tws_methods, sizeof(struct tws_softc) }; static devclass_t tws_devclass; /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */ DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0); MODULE_DEPEND(tws, cam, 1, 1, 1); MODULE_DEPEND(tws, pci, 1, 1, 1); TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth); TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi); Index: projects/clang700-import/sys/dev/tws/tws_cam.c =================================================================== --- projects/clang700-import/sys/dev/tws/tws_cam.c (revision 340426) +++ projects/clang700-import/sys/dev/tws/tws_cam.c (revision 340427) @@ -1,1317 +1,1306 @@ /* * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 LSI Corp. * All rights reserved. * Author : Manjunath Ranganathaiah * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS); static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"}; static void tws_action(struct cam_sim *sim, union ccb *ccb); static void tws_poll(struct cam_sim *sim); static void tws_scsi_complete(struct tws_request *req); void tws_unmap_request(struct tws_softc *sc, struct tws_request *req); int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req); int tws_bus_scan(struct tws_softc *sc); int tws_cam_attach(struct tws_softc *sc); void tws_cam_detach(struct tws_softc *sc); void tws_reset(void *arg); static void tws_reset_cb(void *arg); static void tws_reinit(void *arg); static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb); static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req); static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest, u_int16_t num_sgl_entries); static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa); static void tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr); static void tws_passthru_err_complete(struct tws_request *req, struct tws_command_header *hdr); void tws_timeout(void *arg); static void tws_intr_attn_aen(struct tws_softc *sc); static void tws_intr_attn_error(struct tws_softc *sc); static void tws_intr_resp(struct tws_softc *sc); void tws_intr(void *arg); void tws_cmd_complete(struct tws_request *req); void tws_aen_complete(struct tws_request *req); int tws_send_scsi_cmd(struct tws_softc *sc, int cmd); void tws_getset_param_complete(struct tws_request *req); int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data); int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data); extern struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type); extern void *tws_release_request(struct tws_request *req); extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req); extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id, u_int64_t *mfa); extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern struct tws_request * tws_q_remove_request(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern void tws_send_event(struct tws_softc *sc, u_int8_t event); extern struct tws_sense * tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa); extern void tws_fetch_aen(void *arg); extern void tws_disable_db_intr(struct tws_softc *sc); extern void tws_enable_db_intr(struct tws_softc *sc); extern void tws_passthru_complete(struct tws_request *req); extern void tws_aen_synctime_with_host(struct tws_softc *sc); extern void tws_circular_aenq_insert(struct tws_softc *sc, struct tws_circular_q *cq, struct tws_event_packet *aen); extern int tws_use_32bit_sgls; extern boolean tws_ctlr_reset(struct tws_softc *sc); extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc, u_int8_t q_type ); extern void tws_turn_off_interrupts(struct tws_softc *sc); extern void tws_turn_on_interrupts(struct tws_softc *sc); extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc); extern void tws_init_obfl_q(struct tws_softc *sc); extern uint8_t tws_get_state(struct tws_softc *sc); extern void tws_assert_soft_reset(struct tws_softc *sc); extern boolean tws_ctlr_ready(struct tws_softc *sc); extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa); extern int tws_setup_intr(struct tws_softc *sc, int irqs); extern int tws_teardown_intr(struct tws_softc *sc); int tws_cam_attach(struct tws_softc *sc) { struct cam_devq *devq; TWS_TRACE_DEBUG(sc, "entry", 0, sc); /* Create a device queue for sim */ /* * if the user sets cam depth to less than 1 * cam may get confused */ if ( tws_cam_depth < 1 ) tws_cam_depth = 1; if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) ) tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS; TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth); if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) { tws_log(sc, CAM_SIMQ_ALLOC); return(ENOMEM); } /* * Create a SIM entry. Though we can support tws_cam_depth * simultaneous requests, we claim to be able to handle only * (tws_cam_depth), so that we always have reserved requests * packet available to service ioctls and internal commands. */ sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc, device_get_unit(sc->tws_dev), -#if (__FreeBSD_version >= 700000) &sc->sim_lock, -#endif tws_cam_depth, 1, devq); /* 1, 1, devq); */ if (sc->sim == NULL) { cam_simq_free(devq); tws_log(sc, CAM_SIM_ALLOC); } /* Register the bus. */ mtx_lock(&sc->sim_lock); if (xpt_bus_register(sc->sim, -#if (__FreeBSD_version >= 700000) sc->tws_dev, -#endif 0) != CAM_SUCCESS) { cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */ sc->sim = NULL; /* so cam_detach will not try to free it */ mtx_unlock(&sc->sim_lock); tws_log(sc, TWS_XPT_BUS_REGISTER); return(ENXIO); } if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->sim)); /* Passing TRUE to cam_sim_free will free the devq as well. */ cam_sim_free(sc->sim, TRUE); tws_log(sc, TWS_XPT_CREATE_PATH); mtx_unlock(&sc->sim_lock); return(ENXIO); } mtx_unlock(&sc->sim_lock); return(0); } void tws_cam_detach(struct tws_softc *sc) { TWS_TRACE_DEBUG(sc, "entry", 0, 0); mtx_lock(&sc->sim_lock); if (sc->path) xpt_free_path(sc->path); if (sc->sim) { xpt_bus_deregister(cam_sim_path(sc->sim)); cam_sim_free(sc->sim, TRUE); } mtx_unlock(&sc->sim_lock); } int tws_bus_scan(struct tws_softc *sc) { union ccb *ccb; TWS_TRACE_DEBUG(sc, "entry", sc, 0); if (!(sc->sim)) return(ENXIO); ccb = xpt_alloc_ccb(); mtx_lock(&sc->sim_lock); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mtx_unlock(&sc->sim_lock); xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); mtx_unlock(&sc->sim_lock); return(0); } static void tws_action(struct cam_sim *sim, union ccb *ccb) { struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim); switch( ccb->ccb_h.func_code ) { case XPT_SCSI_IO: { if ( tws_execute_scsi(sc, ccb) ) TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0); break; } case XPT_ABORT: { TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0); ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; } case XPT_RESET_BUS: { TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb); break; } case XPT_SET_TRAN_SETTINGS: { TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb); -#if (__FreeBSD_version >= 700000 ) ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SPI; ccb->cts.transport_version = 2; ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; -#else - ccb->cts.valid = (CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID); - ccb->cts.flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); -#endif ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb, ccb->ccg.block_size); cam_calc_geometry(&ccb->ccg, 1/* extended */); xpt_done(ccb); break; } case XPT_PATH_INQ: { TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb); ccb->cpi.version_num = 1; ccb->cpi.hba_inquiry = 0; ccb->cpi.target_sprt = 0; ccb->cpi.hba_misc = 0; ccb->cpi.hba_eng_cnt = 0; ccb->cpi.max_target = TWS_MAX_NUM_UNITS; ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1; ccb->cpi.unit_number = cam_sim_unit(sim); ccb->cpi.bus_id = cam_sim_bus(sim); ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID; ccb->cpi.base_transfer_speed = 6000000; strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN); strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); -#if (__FreeBSD_version >= 700000 ) ccb->cpi.transport = XPORT_SPI; ccb->cpi.transport_version = 2; ccb->cpi.protocol = PROTO_SCSI; ccb->cpi.protocol_version = SCSI_REV_2; ccb->cpi.maxio = TWS_MAX_IO_SIZE; -#endif ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: TWS_TRACE_DEBUG(sc, "default", sim, ccb); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void tws_scsi_complete(struct tws_request *req) { struct tws_softc *sc = req->sc; mtx_lock(&sc->q_lock); tws_q_remove_request(sc, req, TWS_BUSY_Q); mtx_unlock(&sc->q_lock); callout_stop(&req->timeout); tws_unmap_request(req->sc, req); req->ccb_ptr->ccb_h.status = CAM_REQ_CMP; mtx_lock(&sc->sim_lock); xpt_done(req->ccb_ptr); mtx_unlock(&sc->sim_lock); mtx_lock(&sc->q_lock); tws_q_insert_tail(sc, req, TWS_FREE_Q); mtx_unlock(&sc->q_lock); } void tws_getset_param_complete(struct tws_request *req) { struct tws_softc *sc = req->sc; TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id); callout_stop(&req->timeout); tws_unmap_request(sc, req); free(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; } void tws_aen_complete(struct tws_request *req) { struct tws_softc *sc = req->sc; struct tws_command_header *sense; struct tws_event_packet event; u_int16_t aen_code=0; TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id); callout_stop(&req->timeout); tws_unmap_request(sc, req); sense = (struct tws_command_header *)req->data; TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0], sense->sense_data[2]); TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id, sense->status_block.res__severity); TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum, sense->status_block.error); TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header, sense->header_desc.size_sense); aen_code = sense->status_block.error; switch ( aen_code ) { case TWS_AEN_SYNC_TIME_WITH_HOST : tws_aen_synctime_with_host(sc); break; case TWS_AEN_QUEUE_EMPTY : break; default : bzero(&event, sizeof(struct tws_event_packet)); event.sequence_id = sc->seq_id; event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME; event.aen_code = sense->status_block.error; event.severity = sense->status_block.res__severity & 0x7; event.event_src = TWS_SRC_CTRL_EVENT; strcpy(event.severity_str, tws_sev_str[event.severity]); event.retrieved = TWS_AEN_NOT_RETRIEVED; bcopy(sense->err_specific_desc, event.parameter_data, TWS_ERROR_SPECIFIC_DESC_LEN); event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0'; event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1; if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) { event.parameter_len += ((u_int8_t)strlen(event.parameter_data + event.parameter_len) + 1); } device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n", event.severity_str, event.event_src, event.aen_code, event.parameter_data + (strlen(event.parameter_data) + 1), event.parameter_data); mtx_lock(&sc->gen_lock); tws_circular_aenq_insert(sc, &sc->aen_q, &event); sc->seq_id++; mtx_unlock(&sc->gen_lock); break; } free(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; if ( aen_code != TWS_AEN_QUEUE_EMPTY ) { /* timeout(tws_fetch_aen, sc, 1);*/ sc->stats.num_aens++; tws_fetch_aen((void *)sc); } } void tws_cmd_complete(struct tws_request *req) { struct tws_softc *sc = req->sc; callout_stop(&req->timeout); tws_unmap_request(sc, req); } static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa) { struct tws_command_header *hdr; struct tws_sense *sen; struct tws_request *req; u_int16_t req_id; u_int32_t reg, status; if ( !mfa ) { TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa); return; } else { /* lookup the sense */ sen = tws_find_sense_from_mfa(sc, mfa); if ( sen == NULL ) { TWS_TRACE_DEBUG(sc, "found null req", 0, mfa); return; } hdr = sen->hdr; TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr); req_id = hdr->header_desc.request_id; req = &sc->reqs[req_id]; TWS_TRACE_DEBUG(sc, "req, id", req, req_id); if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS ) TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code); } switch (req->type) { case TWS_REQ_TYPE_PASSTHRU : tws_passthru_err_complete(req, hdr); break; case TWS_REQ_TYPE_GETSET_PARAM : tws_getset_param_complete(req); break; case TWS_REQ_TYPE_SCSI_IO : tws_scsi_err_complete(req, hdr); break; } mtx_lock(&sc->io_lock); hdr->header_desc.size_header = 128; reg = (u_int32_t)( mfa>>32); tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4); reg = (u_int32_t)(mfa); tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4); status = tws_read_reg(sc, TWS_I2O0_STATUS, 4); if ( status & TWS_BIT13 ) { device_printf(sc->tws_dev, "OBFL Overrun\n"); sc->obfl_q_overrun = true; } mtx_unlock(&sc->io_lock); } static void tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr) { u_int8_t *sense_data; struct tws_softc *sc = req->sc; union ccb *ccb = req->ccb_ptr; TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error, req->cmd_pkt->cmd.pkt_a.status); if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED || hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) { if ( ccb->ccb_h.target_lun ) { TWS_TRACE_DEBUG(sc, "invalid lun error",0,0); ccb->ccb_h.status |= CAM_DEV_NOT_THERE; } else { TWS_TRACE_DEBUG(sc, "invalid target error",0,0); ccb->ccb_h.status |= CAM_SEL_TIMEOUT; } } else { TWS_TRACE_DEBUG(sc, "scsi status error",0,0); ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) && (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; TWS_TRACE_DEBUG(sc, "page mode not supported",0,0); } } /* if there were no error simply mark complete error */ if (ccb->ccb_h.status == 0) ccb->ccb_h.status = CAM_REQ_CMP_ERR; sense_data = (u_int8_t *)&ccb->csio.sense_data; if (sense_data) { memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH ); ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mtx_lock(&sc->sim_lock); xpt_done(ccb); mtx_unlock(&sc->sim_lock); callout_stop(&req->timeout); tws_unmap_request(req->sc, req); mtx_lock(&sc->q_lock); tws_q_remove_request(sc, req, TWS_BUSY_Q); tws_q_insert_tail(sc, req, TWS_FREE_Q); mtx_unlock(&sc->q_lock); } static void tws_passthru_err_complete(struct tws_request *req, struct tws_command_header *hdr) { TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id); req->error_code = hdr->status_block.error; memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header)); tws_passthru_complete(req); } static void tws_drain_busy_queue(struct tws_softc *sc) { struct tws_request *req; union ccb *ccb; TWS_TRACE_DEBUG(sc, "entry", 0, 0); mtx_lock(&sc->q_lock); req = tws_q_remove_tail(sc, TWS_BUSY_Q); mtx_unlock(&sc->q_lock); while ( req ) { TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id); callout_stop(&req->timeout); req->error_code = TWS_REQ_RET_RESET; ccb = (union ccb *)(req->ccb_ptr); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; ccb->ccb_h.status |= CAM_SCSI_BUS_RESET; tws_unmap_request(req->sc, req); mtx_lock(&sc->sim_lock); xpt_done(req->ccb_ptr); mtx_unlock(&sc->sim_lock); mtx_lock(&sc->q_lock); tws_q_insert_tail(sc, req, TWS_FREE_Q); req = tws_q_remove_tail(sc, TWS_BUSY_Q); mtx_unlock(&sc->q_lock); } } static void tws_drain_reserved_reqs(struct tws_softc *sc) { struct tws_request *r; r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH]; if ( r->state != TWS_REQ_STATE_FREE ) { TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0); callout_stop(&r->timeout); tws_unmap_request(sc, r); free(r->data, M_TWS); r->state = TWS_REQ_STATE_FREE; r->error_code = TWS_REQ_RET_RESET; } r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU]; if ( r->state == TWS_REQ_STATE_BUSY ) { TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0); r->error_code = TWS_REQ_RET_RESET; } r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM]; if ( r->state != TWS_REQ_STATE_FREE ) { TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0); callout_stop(&r->timeout); tws_unmap_request(sc, r); free(r->data, M_TWS); r->state = TWS_REQ_STATE_FREE; r->error_code = TWS_REQ_RET_RESET; } } static void tws_drain_response_queue(struct tws_softc *sc) { u_int16_t req_id; u_int64_t mfa; while ( tws_get_response(sc, &req_id, &mfa) ); } static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb) { struct tws_command_packet *cmd_pkt; struct tws_request *req; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); int error; u_int16_t lun; mtx_assert(&sc->sim_lock, MA_OWNED); if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) { TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; xpt_done(ccb); return(0); } if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) { TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; xpt_done(ccb); return(0); } if(ccb_h->flags & CAM_CDB_PHYS) { TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); return(0); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO); if ( !req ) { TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); return(0); } if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) req->flags |= TWS_DIR_IN; if(ccb_h->flags & CAM_DIR_OUT) req->flags |= TWS_DIR_OUT; } else { req->flags = TWS_DIR_NONE; /* no data */ } req->type = TWS_REQ_TYPE_SCSI_IO; req->cb = tws_scsi_complete; cmd_pkt = req->cmd_pkt; /* cmd_pkt->hdr.header_desc.size_header = 128; */ cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id; cmd_pkt->cmd.pkt_a.status = 0; cmd_pkt->cmd.pkt_a.sgl_offset = 16; /* lower nibble */ lun = ccb_h->target_lun & 0XF; lun = lun << 12; cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id; /* upper nibble */ lun = ccb_h->target_lun & 0XF0; lun = lun << 8; cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun; #ifdef TWS_DEBUG if ( csio->cdb_len > 16 ) TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len); #endif if(ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); req->data = ccb; req->flags |= TWS_DATA_CCB; /* save ccb ptr */ req->ccb_ptr = ccb; /* * tws_map_load_data_callback will fill in the SGL, * and submit the I/O. */ sc->stats.scsi_ios++; callout_reset_sbt(&req->timeout, SBT_1MS * ccb->ccb_h.timeout, 0, tws_timeout, req, 0); error = tws_map_request(sc, req); return(error); } int tws_send_scsi_cmd(struct tws_softc *sc, int cmd) { struct tws_request *req; struct tws_command_packet *cmd_pkt; int error; TWS_TRACE_DEBUG(sc, "entry",sc, cmd); req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH); if ( req == NULL ) return(ENOMEM); req->cb = tws_aen_complete; cmd_pkt = req->cmd_pkt; cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; cmd_pkt->cmd.pkt_a.status = 0; cmd_pkt->cmd.pkt_a.unit = 0; cmd_pkt->cmd.pkt_a.sgl_offset = 16; cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id; cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd; cmd_pkt->cmd.pkt_a.cdb[4] = 128; req->length = TWS_SECTOR_SIZE; req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); if ( req->data == NULL ) return(ENOMEM); bzero(req->data, TWS_SECTOR_SIZE); req->flags = TWS_DIR_IN; callout_reset(&req->timeout, (TWS_IO_TIMEOUT * hz), tws_timeout, req); error = tws_map_request(sc, req); return(error); } int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data) { struct tws_request *req; struct tws_command_packet *cmd_pkt; union tws_command_giga *cmd; struct tws_getset_param *param; int error; req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM); if ( req == NULL ) { TWS_TRACE_DEBUG(sc, "null req", 0, 0); return(ENOMEM); } req->length = TWS_SECTOR_SIZE; req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); if ( req->data == NULL ) return(ENOMEM); bzero(req->data, TWS_SECTOR_SIZE); param = (struct tws_getset_param *)req->data; req->cb = tws_getset_param_complete; req->flags = TWS_DIR_OUT; cmd_pkt = req->cmd_pkt; cmd = &cmd_pkt->cmd.pkt_g; cmd->param.sgl_off__opcode = BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM); cmd->param.request_id = (u_int8_t)req->request_id; cmd->param.host_id__unit = 0; cmd->param.param_count = 1; cmd->param.size = 2; /* map routine will add sgls */ /* Specify which parameter we want to set. */ param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR); param->parameter_id = (u_int8_t)(param_id); param->parameter_size_bytes = (u_int16_t)param_size; memcpy(param->data, data, param_size); callout_reset(&req->timeout, (TWS_IOCTL_TIMEOUT * hz), tws_timeout, req); error = tws_map_request(sc, req); return(error); } int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data) { struct tws_request *req; struct tws_command_packet *cmd_pkt; union tws_command_giga *cmd; struct tws_getset_param *param; u_int16_t reqid; u_int64_t mfa; int error = SUCCESS; req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM); if ( req == NULL ) { TWS_TRACE_DEBUG(sc, "null req", 0, 0); return(FAILURE); } req->length = TWS_SECTOR_SIZE; req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); if ( req->data == NULL ) return(FAILURE); bzero(req->data, TWS_SECTOR_SIZE); param = (struct tws_getset_param *)req->data; req->cb = NULL; req->flags = TWS_DIR_IN; cmd_pkt = req->cmd_pkt; cmd = &cmd_pkt->cmd.pkt_g; cmd->param.sgl_off__opcode = BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM); cmd->param.request_id = (u_int8_t)req->request_id; cmd->param.host_id__unit = 0; cmd->param.param_count = 1; cmd->param.size = 2; /* map routine will add sgls */ /* Specify which parameter we want to set. */ param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR); param->parameter_id = (u_int8_t)(param_id); param->parameter_size_bytes = (u_int16_t)param_size; error = tws_map_request(sc, req); if (!error) { reqid = tws_poll4_response(sc, &mfa); tws_unmap_request(sc, req); if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) { memcpy(data, param->data, param_size); } else { error = FAILURE; } } free(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; return(error); } void tws_unmap_request(struct tws_softc *sc, struct tws_request *req) { if (req->data != NULL) { if ( req->flags & TWS_DIR_IN ) bus_dmamap_sync(sc->data_tag, req->dma_map, BUS_DMASYNC_POSTREAD); if ( req->flags & TWS_DIR_OUT ) bus_dmamap_sync(sc->data_tag, req->dma_map, BUS_DMASYNC_POSTWRITE); mtx_lock(&sc->io_lock); bus_dmamap_unload(sc->data_tag, req->dma_map); mtx_unlock(&sc->io_lock); } } int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req) { int32_t error = 0; /* If the command involves data, map that too. */ if (req->data != NULL) { int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); /* * Map the data buffer into bus space and build the SG list. */ mtx_lock(&sc->io_lock); if (req->flags & TWS_DATA_CCB) error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map, req->data, tws_dmamap_data_load_cbfn, req, my_flags); else error = bus_dmamap_load(sc->data_tag, req->dma_map, req->data, req->length, tws_dmamap_data_load_cbfn, req, my_flags); mtx_unlock(&sc->io_lock); if (error == EINPROGRESS) { TWS_TRACE(sc, "in progress", 0, error); tws_freeze_simq(sc, req); error = 0; // EINPROGRESS is not a fatal error. } } else { /* no data involved */ error = tws_submit_command(sc, req); } return(error); } static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct tws_request *req = (struct tws_request *)arg; struct tws_softc *sc = req->sc; u_int16_t sgls = nseg; void *sgl_ptr; struct tws_cmd_generic *gcmd; if ( error ) { TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0); } if ( error == EFBIG ) { TWS_TRACE(sc, "not enough data segs", 0, nseg); req->error_code = error; req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG; return; } if ( req->flags & TWS_DIR_IN ) bus_dmamap_sync(req->sc->data_tag, req->dma_map, BUS_DMASYNC_PREREAD); if ( req->flags & TWS_DIR_OUT ) bus_dmamap_sync(req->sc->data_tag, req->dma_map, BUS_DMASYNC_PREWRITE); if ( segs ) { if ( (req->type == TWS_REQ_TYPE_PASSTHRU && GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) != TWS_FW_CMD_EXECUTE_SCSI) || req->type == TWS_REQ_TYPE_GETSET_PARAM) { gcmd = &req->cmd_pkt->cmd.pkt_g.generic; sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size; gcmd->size += sgls * ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 ); tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls); } else { tws_fill_sg_list(req->sc, (void *)segs, (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls); req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ; } } req->error_code = tws_submit_command(req->sc, req); } static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest, u_int16_t num_sgl_entries) { int i; if ( sc->is64bit ) { struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src; if ( !tws_use_32bit_sgls ) { struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest; if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS ) TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0); for (i = 0; i < num_sgl_entries; i++) { sgl_d[i].address = sgl_s->address; sgl_d[i].length = sgl_s->length; sgl_d[i].flag = 0; sgl_d[i].reserved = 0; sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) + sizeof(bus_dma_segment_t)); } } else { struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest; if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS ) TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0); for (i = 0; i < num_sgl_entries; i++) { sgl_d[i].address = sgl_s->address; sgl_d[i].length = sgl_s->length; sgl_d[i].flag = 0; sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) + sizeof(bus_dma_segment_t)); } } } else { struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src; struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest; if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS ) TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0); for (i = 0; i < num_sgl_entries; i++) { sgl_d[i].address = sgl_s[i].address; sgl_d[i].length = sgl_s[i].length; sgl_d[i].flag = 0; } } } void tws_intr(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; u_int32_t histat=0, db=0; if (!(sc)) { device_printf(sc->tws_dev, "null softc!!!\n"); return; } if ( tws_get_state(sc) == TWS_RESET ) { return; } if ( tws_get_state(sc) != TWS_ONLINE ) { return; } sc->stats.num_intrs++; histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4); if ( histat & TWS_BIT2 ) { TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT); db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); if ( db & TWS_BIT21 ) { tws_intr_attn_error(sc); return; } if ( db & TWS_BIT18 ) { tws_intr_attn_aen(sc); } } if ( histat & TWS_BIT3 ) { tws_intr_resp(sc); } } static void tws_intr_attn_aen(struct tws_softc *sc) { u_int32_t db=0; /* maskoff db intrs until all the aens are fetched */ /* tws_disable_db_intr(sc); */ tws_fetch_aen((void *)sc); tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4); db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); } static void tws_intr_attn_error(struct tws_softc *sc) { u_int32_t db=0; TWS_TRACE(sc, "attn error", 0, 0); tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4); db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); device_printf(sc->tws_dev, "Micro controller error.\n"); tws_reset(sc); } static void tws_intr_resp(struct tws_softc *sc) { u_int16_t req_id; u_int64_t mfa; while ( tws_get_response(sc, &req_id, &mfa) ) { sc->stats.reqs_out++; if ( req_id == TWS_INVALID_REQID ) { TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id); sc->stats.reqs_errored++; tws_err_complete(sc, mfa); continue; } sc->reqs[req_id].cb(&sc->reqs[req_id]); } } static void tws_poll(struct cam_sim *sim) { struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim); TWS_TRACE_DEBUG(sc, "entry", 0, 0); tws_intr((void *) sc); } void tws_timeout(void *arg) { struct tws_request *req = (struct tws_request *)arg; struct tws_softc *sc = req->sc; if ( req->error_code == TWS_REQ_RET_RESET ) { return; } mtx_lock(&sc->gen_lock); if ( req->error_code == TWS_REQ_RET_RESET ) { mtx_unlock(&sc->gen_lock); return; } if ( tws_get_state(sc) == TWS_RESET ) { mtx_unlock(&sc->gen_lock); return; } xpt_freeze_simq(sc->sim, 1); tws_send_event(sc, TWS_RESET_START); if (req->type == TWS_REQ_TYPE_SCSI_IO) { device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n"); } else if (req->type == TWS_REQ_TYPE_PASSTHRU) { device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n"); } else { device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n"); } tws_assert_soft_reset(sc); tws_turn_off_interrupts(sc); tws_reset_cb( (void*) sc ); tws_reinit( (void*) sc ); // device_printf(sc->tws_dev, "Controller Reset complete!\n"); tws_send_event(sc, TWS_RESET_COMPLETE); mtx_unlock(&sc->gen_lock); xpt_release_simq(sc->sim, 1); } void tws_reset(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; mtx_lock(&sc->gen_lock); if ( tws_get_state(sc) == TWS_RESET ) { mtx_unlock(&sc->gen_lock); return; } xpt_freeze_simq(sc->sim, 1); tws_send_event(sc, TWS_RESET_START); device_printf(sc->tws_dev, "Resetting controller\n"); tws_assert_soft_reset(sc); tws_turn_off_interrupts(sc); tws_reset_cb( (void*) sc ); tws_reinit( (void*) sc ); // device_printf(sc->tws_dev, "Controller Reset complete!\n"); tws_send_event(sc, TWS_RESET_COMPLETE); mtx_unlock(&sc->gen_lock); xpt_release_simq(sc->sim, 1); } static void tws_reset_cb(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; time_t endt; int found = 0; u_int32_t reg; if ( tws_get_state(sc) != TWS_RESET ) { return; } // device_printf(sc->tws_dev, "Draining Busy Queue\n"); tws_drain_busy_queue(sc); // device_printf(sc->tws_dev, "Draining Reserved Reqs\n"); tws_drain_reserved_reqs(sc); // device_printf(sc->tws_dev, "Draining Response Queue\n"); tws_drain_response_queue(sc); // device_printf(sc->tws_dev, "Looking for controller ready flag...\n"); endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT; while ((TWS_LOCAL_TIME <= endt) && (!found)) { reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4); if ( reg & TWS_BIT13 ) { found = 1; // device_printf(sc->tws_dev, " ... Got it!\n"); } } if ( !found ) device_printf(sc->tws_dev, " ... Controller ready flag NOT found!\n"); } static void tws_reinit(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; int timeout_val=0; int try=2; int done=0; // device_printf(sc->tws_dev, "Waiting for Controller Ready\n"); while ( !done && try ) { if ( tws_ctlr_ready(sc) ) { done = 1; break; } else { timeout_val += 5; if ( timeout_val >= TWS_RESET_TIMEOUT ) { timeout_val = 0; if ( try ) tws_assert_soft_reset(sc); try--; } mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz); } } if (!done) { device_printf(sc->tws_dev, "FAILED to get Controller Ready!\n"); return; } sc->obfl_q_overrun = false; // device_printf(sc->tws_dev, "Sending initConnect\n"); if ( tws_init_connect(sc, tws_queue_depth) ) { TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit); } tws_init_obfl_q(sc); tws_turn_on_interrupts(sc); wakeup_one(sc); } static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req) { /* Only for IO commands */ if (req->type == TWS_REQ_TYPE_SCSI_IO) { union ccb *ccb = (union ccb *)(req->ccb_ptr); xpt_freeze_simq(sc->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ccb->ccb_h.status |= CAM_REQUEUE_REQ; } } TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth); Index: projects/clang700-import/sys/dev/tws/tws_services.h =================================================================== --- projects/clang700-import/sys/dev/tws/tws_services.h (revision 340426) +++ projects/clang700-import/sys/dev/tws/tws_services.h (revision 340427) @@ -1,142 +1,135 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2010, LSI Corp. * All rights reserved. * Author : Manjunath Ranganathaiah * Support: freebsdraid@lsi.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of the nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* #define TWS_DEBUG on */ void tws_trace(const char *file, const char *fun, int linenum, struct tws_softc *sc, char *desc, u_int64_t val1, u_int64_t val2); void tws_log(struct tws_softc *sc, int index); u_int32_t tws_read_reg(struct tws_softc *sc, int offset, int size); void tws_write_reg(struct tws_softc *sc, int offset, u_int32_t value, int size); u_int16_t tws_swap16(u_int16_t val); u_int32_t tws_swap32(u_int32_t val); u_int64_t tws_swap64(u_int64_t val); void tws_init_qs(struct tws_softc *sc); /* ----------------- trace ----------------- */ #define TWS_TRACE_ON on /* Alawys on - use wisely to trace errors */ #ifdef TWS_DEBUG #define TWS_TRACE_DEBUG_ON on #endif #ifdef TWS_TRACE_DEBUG_ON #define TWS_TRACE_DEBUG(sc, desc, val1, val2) \ tws_trace(__FILE__, __func__, __LINE__, sc, desc, \ (u_int64_t)val1, (u_int64_t)val2) #else #define TWS_TRACE_DEBUG(sc, desc, val1, val2) #endif #ifdef TWS_TRACE_ON #define TWS_TRACE(sc, desc, val1, val2) \ tws_trace(__FILE__, __func__, __LINE__, sc, desc, \ (u_int64_t)val1, (u_int64_t)val2) #else #define TWS_TRACE(sc, desc, val1, val2) #endif /* ---------------- logging ---------------- */ /* ---------------- logging ---------------- */ enum error_index { SYSCTL_TREE_NODE_ADD, PCI_COMMAND_READ, ALLOC_MEMORY_RES, ALLOC_IRQ_RES, SETUP_INTR_RES, TWS_CAM_ATTACH, CAM_SIMQ_ALLOC, CAM_SIM_ALLOC, TWS_XPT_BUS_REGISTER, TWS_XPT_CREATE_PATH, TWS_BUS_SCAN_REQ, TWS_INIT_FAILURE, TWS_CTLR_INIT_FAILURE, }; enum severity { ERROR = 1, WARNING, INFO, #if 0 DEBUG, #endif }; struct error_desc { char desc[256]; u_int32_t error_code; int severity_level; char *fmt; char *error_str; }; /* ----------- q services ------------- */ #define TWS_FREE_Q 0 #define TWS_PENDING_Q 1 #define TWS_BUSY_Q 2 #define TWS_COMPLETE_Q 3 /* req return codes */ #define TWS_REQ_RET_SUBMIT_SUCCESS 0 #define TWS_REQ_RET_PEND_NOMFA 1 #define TWS_REQ_RET_RESET 2 #define TWS_REQ_RET_INVALID 0xdead /* ------------------------ */ -#if (__FreeBSD_version >= 700000) #include #define TWS_LOCAL_TIME (time_second - utc_offset()) -#else -#include -#define TWS_LOCAL_TIME (time_second - (tz_minuteswest * 60) - \ - (wall_cmos_clock ? adjkerntz : 0)) -#endif - Index: projects/clang700-import/sys/dts/Makefile.inc =================================================================== --- projects/clang700-import/sys/dts/Makefile.inc (revision 340426) +++ projects/clang700-import/sys/dts/Makefile.inc (revision 340427) @@ -1,13 +1,13 @@ # $FreeBSD$ SYSDIR?=${SRCTOP}/sys test-dts: .for dts in ${DTS} @env MACHINE=`basename ${.CURDIR}` ${SYSDIR}/tools/fdt/make_dtb.sh ${SYSDIR} ${dts} /tmp +.endfor test-dtso: .for dtso in ${DTSO} @env MACHINE=`basename ${.CURDIR}` ${SYSDIR}/tools/fdt/make_dtbo.sh ${SYSDIR} ${dtso} /tmp - .endfor Index: projects/clang700-import/sys/dts/arm/Makefile =================================================================== --- projects/clang700-import/sys/dts/arm/Makefile (revision 340426) +++ projects/clang700-import/sys/dts/arm/Makefile (revision 340427) @@ -1,7 +1,7 @@ # $FreeBSD$ -DTS!=ls *.dts +DTS!=ls ${.CURDIR}/*.dts all: test-dts .include Index: projects/clang700-import/sys/dts/arm/Makefile.inc =================================================================== --- projects/clang700-import/sys/dts/arm/Makefile.inc (nonexistent) +++ projects/clang700-import/sys/dts/arm/Makefile.inc (revision 340427) @@ -0,0 +1,3 @@ +# $FreeBSD$ + +.include "../Makefile.inc" Property changes on: projects/clang700-import/sys/dts/arm/Makefile.inc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang700-import/sys/dts/arm/overlays/Makefile =================================================================== --- projects/clang700-import/sys/dts/arm/overlays/Makefile (revision 340426) +++ projects/clang700-import/sys/dts/arm/overlays/Makefile (revision 340427) @@ -1,7 +1,7 @@ # $FreeBSD$ -DTSO!=ls *.dtso +DTSO!=ls ${.CURDIR}/*.dtso all: test-dtso .include Index: projects/clang700-import/sys/dts/arm64/Makefile =================================================================== --- projects/clang700-import/sys/dts/arm64/Makefile (revision 340426) +++ projects/clang700-import/sys/dts/arm64/Makefile (revision 340427) @@ -1,7 +1,7 @@ # $FreeBSD$ -DTS!=ls *.dts +DTS!=ls ${.CURDIR}/*.dts all: test-dts .include Index: projects/clang700-import/sys/dts/arm64/Makefile.inc =================================================================== --- projects/clang700-import/sys/dts/arm64/Makefile.inc (nonexistent) +++ projects/clang700-import/sys/dts/arm64/Makefile.inc (revision 340427) @@ -0,0 +1,3 @@ +# $FreeBSD$ + +.include "../Makefile.inc" Property changes on: projects/clang700-import/sys/dts/arm64/Makefile.inc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang700-import/sys/dts/arm64/overlays/Makefile =================================================================== --- projects/clang700-import/sys/dts/arm64/overlays/Makefile (revision 340426) +++ projects/clang700-import/sys/dts/arm64/overlays/Makefile (revision 340427) @@ -1,7 +1,7 @@ # $FreeBSD$ -DTSO!=ls *.dtso +DTSO!=ls ${.CURDIR}/*.dtso all: test-dtso .include Index: projects/clang700-import/sys/dts/mips/Makefile =================================================================== --- projects/clang700-import/sys/dts/mips/Makefile (revision 340426) +++ projects/clang700-import/sys/dts/mips/Makefile (revision 340427) @@ -1,7 +1,7 @@ # $FreeBSD$ -DTS!=ls *.dts +DTS!=ls ${.CURDIR}/*.dts all: test-dts .include Index: projects/clang700-import/sys/dts/powerpc/Makefile =================================================================== --- projects/clang700-import/sys/dts/powerpc/Makefile (revision 340426) +++ projects/clang700-import/sys/dts/powerpc/Makefile (revision 340427) @@ -1,7 +1,7 @@ # $FreeBSD$ -DTS!=ls *.dts +DTS!=ls ${.CURDIR}/*.dts all: test-dts .include Index: projects/clang700-import/sys/i386/conf/GENERIC =================================================================== --- projects/clang700-import/sys/i386/conf/GENERIC (revision 340426) +++ projects/clang700-import/sys/i386/conf/GENERIC (revision 340427) @@ -1,368 +1,373 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/i386 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu I486_CPU cpu I586_CPU cpu I686_CPU ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC # IP (v4/v6) security options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 options TCP_HHOOK # hhook(9) framework for TCP options TCP_OFFLOAD # TCP offload options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options NFSCL # Network Filesystem Client options NFSD # Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options COMPAT_FREEBSD6 # Compatible with FreeBSD6 options COMPAT_FREEBSD7 # Compatible with FreeBSD7 options COMPAT_FREEBSD9 # Compatible with FreeBSD9 options COMPAT_FREEBSD10 # Compatible with FreeBSD10 options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel options RACCT # Resource accounting framework options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default options RCTL # Resource limits # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options DDB # Support DDB. options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default # Kernel dump features. options EKCD # Support for encrypted kernel dumps options GZIO # gzip-compressed kernel and user dumps options ZSTDIO # zstd-compressed kernel and user dumps options NETDUMP # netdump(4) client support # To make an SMP kernel, the next two lines are needed options SMP # Symmetric MultiProcessor Kernel device apic # I/O APIC options EARLY_AP_STARTUP # CPU frequency control device cpufreq # Bus support. device acpi device pci options PCI_HP # PCI-Express native HotPlug options PCI_IOV # PCI SR-IOV support # Floppy drives device fdc # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA # SCSI Controllers device ahc # AHA2940 and onboard AIC7xxx devices device esp # AMD Am53C974 (Tekram DC-390(T)) device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family #device ispfw # Firmware for QLogic HBAs- normally a module device mpt # LSI-Logic MPT-Fusion device mps # LSI-Logic MPT-Fusion 2 device mpr # LSI-Logic MPT-Fusion 3 device sym # NCR/Symbios Logic device trm # Tekram DC395U/UW/F DC315U adapters device isci # Intel C600 SAS controller # ATA/SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) device ses # Enclosure Services (SES and SAF-TE) #device ctl # CAM Target Layer # RAID controllers interfaced to the SCSI subsystem device amr # AMI MegaRAID device arcmsr # Areca SATA II RAID device ciss # Compaq Smart RAID 5* device hptmv # Highpoint RocketRAID 182x device hptnr # Highpoint DC7280, R750 device hptrr # Highpoint RocketRAID 17xx, 22xx, 23xx, 25xx device hpt27xx # Highpoint RocketRAID 27xx device iir # Intel Integrated RAID device ips # IBM (Adaptec) ServeRAID device mly # Mylex AcceleRAID/eXtremeRAID device twa # 3ware 9000 series PATA/SATA RAID device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller # RAID controllers device aac # Adaptec FSA RAID device aacp # SCSI passthrough for aac (requires CAM) device aacraid # Adaptec by PMC RAID device ida # Compaq Smart RAID device mfi # LSI MegaRAID SAS device mlx # Mylex DAC960 family device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s device pmspcv # PMC-Sierra SAS/SATA Controller driver device pst # Promise Supertrak SX6000 device twe # 3ware ATA RAID # NVM Express (NVMe) support device nvme # base NVMe driver device nvd # expose NVMe namespace as disks, depends on nvme # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver options VESA # Add support for VESA BIOS Extensions (VBE) device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc options SC_PIXEL_MODE # add support for the raster text mode # vt is the new video console driver device vt device vt_vga device agp # support several AGP chipsets # Power management support (see NOTES for more options) #device apm # PCCARD (PCMCIA) support # PCMCIA and cardbus bridge support device cbb # cardbus (yenta) bridge device pccard # PC Card (16-bit) bus device cardbus # CardBus (32-bit) bus # Serial (COM) ports device uart # Generic UART driver # Parallel port device ppc device ppbus # Parallel port bus (required) device lpt # Printer device ppi # Parallel port interface device #device vpo # Requires scbus and da device puc # Multi I/O cards and multi-channel UARTs # PCI Ethernet NICs. device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE device de # DEC/Intel DC21x4x (``Tulip'') device em # Intel PRO/1000 Gigabit Ethernet Family device le # AMD Am7900 LANCE and Am79C9xx PCnet device ti # Alteon Networks Tigon I/II gigabit Ethernet device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Ethernet NICs that use the common MII bus controller code. # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs! device miibus # MII bus support device ae # Attansic/Atheros L2 FastEthernet device age # Attansic/Atheros L1 Gigabit Ethernet device alc # Atheros AR8131/AR8132 Ethernet device ale # Atheros AR8121/AR8113/AR8114 Ethernet device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet device bfe # Broadcom BCM440x 10/100 Ethernet device bge # Broadcom BCM570xx Gigabit Ethernet device cas # Sun Cassini/Cassini+ and NS DP83065 Saturn device dc # DEC/Intel 21143 and various workalikes device et # Agere ET1310 10/100/Gigabit Ethernet device fxp # Intel EtherExpress PRO/100B (82557, 82558) device gem # Sun GEM/Sun ERI/Apple GMAC device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device nfe # nVidia nForce MCP on-board Ethernet device nge # NatSemi DP83820 gigabit Ethernet device pcn # AMD Am79C97x PCI 10/100 (precedence over 'le') device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 device sf # Adaptec AIC-6915 (``Starfire'') device sge # Silicon Integrated Systems SiS190/191 device sis # Silicon Integrated Systems SiS 900/SiS 7016 device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet device ste # Sundance ST201 (D-Link DFE-550TX) device stge # Sundance/Tamarack TC9021 gigabit Ethernet device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vge # VIA VT612x gigabit Ethernet device vr # VIA Rhine, Rhine II device vte # DM&P Vortex86 RDC R6040 Fast Ethernet device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # ISA Ethernet NICs. pccard NICs included. device cs # Crystal Semiconductor CS89x0 NIC # 'device ed' requires 'device miibus' device ed # NE[12]000, SMC Ultra, 3c503, DS8390 cards device ex # Intel EtherExpress Pro/10 and Pro/10+ device ep # Etherlink III based cards device fe # Fujitsu MB8696x based cards device sn # SMC's 9000 series of Ethernet chips device xe # Xircom pccard Ethernet # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device wlan_amrr # AMRR transmit rate control algorithm device an # Aironet 4500/4800 802.11 wireless NICs. device ath # Atheros NICs device ath_pci # Atheros pci/cardbus glue device ath_hal # pci/cardbus chip support options AH_SUPPORT_AR5416 # enable AR5416 tx/rx descriptors options AH_AR5416_INTERRUPT_MITIGATION # AR5416 interrupt mitigation options ATH_ENABLE_11N # Enable 802.11n support for AR5416 and later device ath_rate_sample # SampleRate tx rate control for ath #device bwi # Broadcom BCM430x/BCM431x wireless NICs. #device bwn # Broadcom BCM43xx wireless NICs. device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device malo # Marvell Libertas wireless NICs. device mwl # Marvell 88W8363 802.11n wireless NICs. device ral # Ralink Technology RT2500 wireless NICs. device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs. device wpi # Intel 3945ABG wireless NICs. # Pseudo devices. device crypto # core crypto support device loop # Network loopback device random # Entropy device device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support options USB_DEBUG # enable debug msgs device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface (USB 2.0) device xhci # XHCI PCI->USB interface (USB 3.0) device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # Sound support device sound # Generic sound driver (required) device snd_cmi # CMedia CMI8338/CMI8738 device snd_csa # Crystal Semiconductor CS461x/428x device snd_emu10kx # Creative SoundBlaster Live! and Audigy device snd_es137x # Ensoniq AudioPCI ES137x device snd_hda # Intel High Definition Audio device snd_ich # Intel, NVidia and other ICH AC'97 Audio device snd_via8233 # VIA VT8233x Audio # MMC/SD device mmc # MMC/SD bus device mmcsd # MMC/SD memory card device sdhci # Generic PCI SD Host Controller # VirtIO support device virtio # Generic VirtIO bus (required) device virtio_pci # VirtIO PCI device device vtnet # VirtIO Ethernet device device virtio_blk # VirtIO Block device device virtio_scsi # VirtIO SCSI device device virtio_balloon # VirtIO Memory Balloon device # HyperV drivers and enchancement support device hyperv # HyperV drivers # Xen HVM Guest Optimizations # NOTE: XENHVM depends on xenpci. They must be added or removed together. options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver # VMware support device vmx # VMware VMXNET3 Ethernet + +# evdev interface +options EVDEV_SUPPORT # evdev support in legacy drivers +device evdev # input event device support +device uinput # install /dev/uinput cdev Index: projects/clang700-import/sys/i386/conf/MINIMAL =================================================================== --- projects/clang700-import/sys/i386/conf/MINIMAL (revision 340426) +++ projects/clang700-import/sys/i386/conf/MINIMAL (revision 340427) @@ -1,150 +1,155 @@ # # MINIMAL -- Mostly Minimal kernel configuration file for FreeBSD/i386 # # Many definitions of minimal are possible. The one this file follows is # GENERIC, minus all functionality that can be replaced by loading kernel # modules. # # Exceptions: # o While UFS is buildable as a module, the current module lacks # some features (ACL, GJOURNAL) that GENERIC includes. # o acpi as a module has been reported flakey and not well tested, so # is included in the kernel. # o random is included due to uncertaty... # o Many networking things are included # # For now, please run changes to these list past imp@freebsd.org # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu I486_CPU cpu I586_CPU cpu I686_CPU ident MINIMAL makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols options TCP_OFFLOAD # TCP offload options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options COMPAT_FREEBSD6 # Compatible with FreeBSD6 options COMPAT_FREEBSD7 # Compatible with FreeBSD7 options COMPAT_FREEBSD9 # Compatible with FreeBSD9 options COMPAT_FREEBSD10 # Compatible with FreeBSD10 options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options DDB # Support DDB. options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel options EARLY_AP_STARTUP device apic # CPU frequency control device cpufreq # Bus support. device acpi options ACPI_DMAR device pci # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver options VESA # Add support for VESA BIOS Extensions (VBE) device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc options SC_PIXEL_MODE # add support for the raster text mode # vt is the new video console driver device vt device vt_vga device vt_efifb device agp # support several AGP chipsets # Pseudo devices. device loop # Network loopback device random # Entropy device device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device gif # IPv6 and IPv4 tunneling # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # Xen HVM Guest Optimizations # NOTE: XENHVM depends on xenpci. They must be added or removed together. options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver + +# evdev interface +options EVDEV_SUPPORT # evdev support in legacy drivers +device evdev # input event device support +device uinput # install /dev/uinput cdev Index: projects/clang700-import/sys/kern/genoffset.c =================================================================== --- projects/clang700-import/sys/kern/genoffset.c (revision 340426) +++ projects/clang700-import/sys/kern/genoffset.c (revision 340427) @@ -1,44 +1,43 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef OFFSET_TEST #define GENOFFSET #endif #include __FBSDID("$FreeBSD$"); #include #include #include -OFFSYM(td_pre_epoch_prio, thread, u_char); OFFSYM(td_priority, thread, u_char); OFFSYM(td_epochnest, thread, u_char); OFFSYM(td_critnest, thread, u_int); OFFSYM(td_pinned, thread, int); OFFSYM(td_owepreempt, thread, u_char); Index: projects/clang700-import/sys/kern/kern_environment.c =================================================================== --- projects/clang700-import/sys/kern/kern_environment.c (revision 340426) +++ projects/clang700-import/sys/kern/kern_environment.c (revision 340427) @@ -1,932 +1,933 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * The unified bootloader passes us a pointer to a preserved copy of * bootstrap/kernel environment variables. We convert them to a * dynamic array of strings later when the VM subsystem is up. * * We make these available through the kenv(2) syscall for userland * and through kern_getenv()/freeenv() kern_setenv() kern_unsetenv() testenv() for * the kernel. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static char *_getenv_dynamic_locked(const char *name, int *idx); static char *_getenv_dynamic(const char *name, int *idx); static MALLOC_DEFINE(M_KENV, "kenv", "kernel environment"); #define KENV_SIZE 512 /* Maximum number of environment strings */ /* pointer to the config-generated static environment */ char *kern_envp; /* pointer to the md-static environment */ char *md_envp; static int md_env_len; static int md_env_pos; static char *kernenv_next(char *); /* dynamic environment variables */ char **kenvp; struct mtx kenv_lock; /* * No need to protect this with a mutex since SYSINITS are single threaded. */ bool dynamic_kenv; #define KENV_CHECK if (!dynamic_kenv) \ panic("%s: called before SI_SUB_KMEM", __func__) int sys_kenv(td, uap) struct thread *td; struct kenv_args /* { int what; const char *name; char *value; int len; } */ *uap; { char *name, *value, *buffer = NULL; size_t len, done, needed, buflen; int error, i; KASSERT(dynamic_kenv, ("kenv: dynamic_kenv = false")); error = 0; if (uap->what == KENV_DUMP) { #ifdef MAC error = mac_kenv_check_dump(td->td_ucred); if (error) return (error); #endif done = needed = 0; buflen = uap->len; if (buflen > KENV_SIZE * (KENV_MNAMELEN + KENV_MVALLEN + 2)) buflen = KENV_SIZE * (KENV_MNAMELEN + KENV_MVALLEN + 2); if (uap->len > 0 && uap->value != NULL) buffer = malloc(buflen, M_TEMP, M_WAITOK|M_ZERO); mtx_lock(&kenv_lock); for (i = 0; kenvp[i] != NULL; i++) { len = strlen(kenvp[i]) + 1; needed += len; len = min(len, buflen - done); /* * If called with a NULL or insufficiently large * buffer, just keep computing the required size. */ if (uap->value != NULL && buffer != NULL && len > 0) { bcopy(kenvp[i], buffer + done, len); done += len; } } mtx_unlock(&kenv_lock); if (buffer != NULL) { error = copyout(buffer, uap->value, done); free(buffer, M_TEMP); } td->td_retval[0] = ((done == needed) ? 0 : needed); return (error); } switch (uap->what) { case KENV_SET: error = priv_check(td, PRIV_KENV_SET); if (error) return (error); break; case KENV_UNSET: error = priv_check(td, PRIV_KENV_UNSET); if (error) return (error); break; } name = malloc(KENV_MNAMELEN + 1, M_TEMP, M_WAITOK); error = copyinstr(uap->name, name, KENV_MNAMELEN + 1, NULL); if (error) goto done; switch (uap->what) { case KENV_GET: #ifdef MAC error = mac_kenv_check_get(td->td_ucred, name); if (error) goto done; #endif value = kern_getenv(name); if (value == NULL) { error = ENOENT; goto done; } len = strlen(value) + 1; if (len > uap->len) len = uap->len; error = copyout(value, uap->value, len); freeenv(value); if (error) goto done; td->td_retval[0] = len; break; case KENV_SET: len = uap->len; if (len < 1) { error = EINVAL; goto done; } if (len > KENV_MVALLEN + 1) len = KENV_MVALLEN + 1; value = malloc(len, M_TEMP, M_WAITOK); error = copyinstr(uap->value, value, len, NULL); if (error) { free(value, M_TEMP); goto done; } #ifdef MAC error = mac_kenv_check_set(td->td_ucred, name, value); if (error == 0) #endif kern_setenv(name, value); free(value, M_TEMP); break; case KENV_UNSET: #ifdef MAC error = mac_kenv_check_unset(td->td_ucred, name); if (error) goto done; #endif error = kern_unsetenv(name); if (error) error = ENOENT; break; default: error = EINVAL; break; } done: free(name, M_TEMP); return (error); } /* * Populate the initial kernel environment. * * This is called very early in MD startup, either to provide a copy of the * environment obtained from a boot loader, or to provide an empty buffer into * which MD code can store an initial environment using kern_setenv() calls. * * kern_envp is set to the static_env generated by config(8). This implements * the env keyword described in config(5). * * If len is non-zero, the caller is providing an empty buffer. The caller will * subsequently use kern_setenv() to add up to len bytes of initial environment * before the dynamic environment is available. * * If len is zero, the caller is providing a pre-loaded buffer containing * environment strings. Additional strings cannot be added until the dynamic * environment is available. The memory pointed to must remain stable at least * until sysinit runs init_dynamic_kenv() and preferably until after SI_SUB_KMEM * is finished so that subr_hints routines may continue to use it until the * environments have been fully merged at the end of the pass. If no initial * environment is available from the boot loader, passing a NULL pointer allows * the static_env to be installed if it is configured. In this case, any call * to kern_setenv() prior to the setup of the dynamic environment will result in * a panic. */ void init_static_kenv(char *buf, size_t len) { char *eval; + KASSERT(!dynamic_kenv, ("kenv: dynamic_kenv already initialized")); /* * Give the static environment a chance to disable the loader(8) * environment first. This is done with loader_env.disabled=1. * * static_env and static_hints may both be disabled, but in slightly * different ways. For static_env, we just don't setup kern_envp and * it's as if a static env wasn't even provided. For static_hints, * we effectively zero out the buffer to stop the rest of the kernel * from being able to use it. * * We're intentionally setting this up so that static_hints.disabled may * be specified in either the MD env or the static env. This keeps us * consistent in our new world view. * * As a warning, the static environment may not be disabled in any way * if the static environment has disabled the loader environment. */ kern_envp = static_env; eval = kern_getenv("loader_env.disabled"); if (eval == NULL || strcmp(eval, "1") != 0) { md_envp = buf; md_env_len = len; md_env_pos = 0; eval = kern_getenv("static_env.disabled"); if (eval != NULL && strcmp(eval, "1") == 0) *kern_envp = '\0'; } eval = kern_getenv("static_hints.disabled"); if (eval != NULL && strcmp(eval, "1") == 0) *static_hints = '\0'; } static void init_dynamic_kenv_from(char *init_env, int *curpos) { char *cp, *cpnext, *eqpos, *found; size_t len; int i; if (init_env && *init_env != '\0') { found = NULL; i = *curpos; for (cp = init_env; cp != NULL; cp = cpnext) { cpnext = kernenv_next(cp); len = strlen(cp) + 1; if (len > KENV_MNAMELEN + 1 + KENV_MVALLEN + 1) { printf( "WARNING: too long kenv string, ignoring %s\n", cp); goto sanitize; } eqpos = strchr(cp, '='); if (eqpos == NULL) { printf( "WARNING: malformed static env value, ignoring %s\n", cp); goto sanitize; } *eqpos = 0; /* * De-dupe the environment as we go. We don't add the * duplicated assignments because config(8) will flip * the order of the static environment around to make * kernel processing match the order of specification * in the kernel config. */ found = _getenv_dynamic_locked(cp, NULL); *eqpos = '='; if (found != NULL) goto sanitize; if (i > KENV_SIZE) { printf( "WARNING: too many kenv strings, ignoring %s\n", cp); goto sanitize; } kenvp[i] = malloc(len, M_KENV, M_WAITOK); strcpy(kenvp[i++], cp); sanitize: explicit_bzero(cp, len - 1); } *curpos = i; } } /* * Setup the dynamic kernel environment. */ static void init_dynamic_kenv(void *data __unused) { int dynamic_envpos; kenvp = malloc((KENV_SIZE + 1) * sizeof(char *), M_KENV, M_WAITOK | M_ZERO); dynamic_envpos = 0; init_dynamic_kenv_from(md_envp, &dynamic_envpos); init_dynamic_kenv_from(kern_envp, &dynamic_envpos); kenvp[dynamic_envpos] = NULL; mtx_init(&kenv_lock, "kernel environment", NULL, MTX_DEF); dynamic_kenv = true; } SYSINIT(kenv, SI_SUB_KMEM + 1, SI_ORDER_FIRST, init_dynamic_kenv, NULL); void freeenv(char *env) { if (dynamic_kenv && env != NULL) { explicit_bzero(env, strlen(env)); free(env, M_KENV); } } /* * Internal functions for string lookup. */ static char * _getenv_dynamic_locked(const char *name, int *idx) { char *cp; int len, i; len = strlen(name); for (cp = kenvp[0], i = 0; cp != NULL; cp = kenvp[++i]) { if ((strncmp(cp, name, len) == 0) && (cp[len] == '=')) { if (idx != NULL) *idx = i; return (cp + len + 1); } } return (NULL); } static char * _getenv_dynamic(const char *name, int *idx) { mtx_assert(&kenv_lock, MA_OWNED); return (_getenv_dynamic_locked(name, idx)); } static char * _getenv_static_from(char *chkenv, const char *name) { char *cp, *ep; int len; for (cp = chkenv; cp != NULL; cp = kernenv_next(cp)) { for (ep = cp; (*ep != '=') && (*ep != 0); ep++) ; if (*ep != '=') continue; len = ep - cp; ep++; if (!strncmp(name, cp, len) && name[len] == 0) return (ep); } return (NULL); } static char * _getenv_static(const char *name) { char *val; val = _getenv_static_from(md_envp, name); if (val != NULL) return (val); val = _getenv_static_from(kern_envp, name); if (val != NULL) return (val); return (NULL); } /* * Look up an environment variable by name. * Return a pointer to the string if found. * The pointer has to be freed with freeenv() * after use. */ char * kern_getenv(const char *name) { char buf[KENV_MNAMELEN + 1 + KENV_MVALLEN + 1]; char *ret; if (dynamic_kenv) { if (getenv_string(name, buf, sizeof(buf))) { ret = strdup(buf, M_KENV); } else { ret = NULL; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "getenv"); } } else ret = _getenv_static(name); return (ret); } /* * Test if an environment variable is defined. */ int testenv(const char *name) { char *cp; if (dynamic_kenv) { mtx_lock(&kenv_lock); cp = _getenv_dynamic(name, NULL); mtx_unlock(&kenv_lock); } else cp = _getenv_static(name); if (cp != NULL) return (1); return (0); } /* * Set an environment variable in the MD-static environment. This cannot * feasibly be done on config(8)-generated static environments as they don't * generally include space for extra variables. */ static int setenv_static(const char *name, const char *value) { int len; if (md_env_pos >= md_env_len) return (-1); /* Check space for x=y and two nuls */ len = strlen(name) + strlen(value); if (len + 3 < md_env_len - md_env_pos) { len = sprintf(&md_envp[md_env_pos], "%s=%s", name, value); md_env_pos += len+1; md_envp[md_env_pos] = '\0'; return (0); } else return (-1); } /* * Set an environment variable by name. */ int kern_setenv(const char *name, const char *value) { char *buf, *cp, *oldenv; int namelen, vallen, i; if (!dynamic_kenv && md_env_len > 0) return (setenv_static(name, value)); KENV_CHECK; namelen = strlen(name) + 1; if (namelen > KENV_MNAMELEN + 1) return (-1); vallen = strlen(value) + 1; if (vallen > KENV_MVALLEN + 1) return (-1); buf = malloc(namelen + vallen, M_KENV, M_WAITOK); sprintf(buf, "%s=%s", name, value); mtx_lock(&kenv_lock); cp = _getenv_dynamic(name, &i); if (cp != NULL) { oldenv = kenvp[i]; kenvp[i] = buf; mtx_unlock(&kenv_lock); free(oldenv, M_KENV); } else { /* We add the option if it wasn't found */ for (i = 0; (cp = kenvp[i]) != NULL; i++) ; /* Bounds checking */ if (i < 0 || i >= KENV_SIZE) { free(buf, M_KENV); mtx_unlock(&kenv_lock); return (-1); } kenvp[i] = buf; kenvp[i + 1] = NULL; mtx_unlock(&kenv_lock); } return (0); } /* * Unset an environment variable string. */ int kern_unsetenv(const char *name) { char *cp, *oldenv; int i, j; KENV_CHECK; mtx_lock(&kenv_lock); cp = _getenv_dynamic(name, &i); if (cp != NULL) { oldenv = kenvp[i]; for (j = i + 1; kenvp[j] != NULL; j++) kenvp[i++] = kenvp[j]; kenvp[i] = NULL; mtx_unlock(&kenv_lock); explicit_bzero(oldenv, strlen(oldenv)); free(oldenv, M_KENV); return (0); } mtx_unlock(&kenv_lock); return (-1); } /* * Return a string value from an environment variable. */ int getenv_string(const char *name, char *data, int size) { char *cp; if (dynamic_kenv) { mtx_lock(&kenv_lock); cp = _getenv_dynamic(name, NULL); if (cp != NULL) strlcpy(data, cp, size); mtx_unlock(&kenv_lock); } else { cp = _getenv_static(name); if (cp != NULL) strlcpy(data, cp, size); } return (cp != NULL); } /* * Return an array of integers at the given type size and signedness. */ int getenv_array(const char *name, void *pdata, int size, int *psize, int type_size, bool allow_signed) { char buf[KENV_MNAMELEN + 1 + KENV_MVALLEN + 1]; uint8_t shift; int64_t value; int64_t old; char *end; char *ptr; int n; if (getenv_string(name, buf, sizeof(buf)) == 0) return (0); /* get maximum number of elements */ size /= type_size; n = 0; for (ptr = buf; *ptr != 0; ) { value = strtoq(ptr, &end, 0); /* check if signed numbers are allowed */ if (value < 0 && !allow_signed) goto error; /* check for invalid value */ if (ptr == end) goto error; /* check for valid suffix */ switch (*end) { case 't': case 'T': shift = 40; end++; break; case 'g': case 'G': shift = 30; end++; break; case 'm': case 'M': shift = 20; end++; break; case 'k': case 'K': shift = 10; end++; break; case ' ': case '\t': case ',': case 0: shift = 0; break; default: /* garbage after numeric value */ goto error; } /* skip till next value, if any */ while (*end == '\t' || *end == ',' || *end == ' ') end++; /* update pointer */ ptr = end; /* apply shift */ old = value; value <<= shift; /* overflow check */ if ((value >> shift) != old) goto error; /* check for buffer overflow */ if (n >= size) goto error; /* store value according to type size */ switch (type_size) { case 1: if (allow_signed) { if (value < SCHAR_MIN || value > SCHAR_MAX) goto error; } else { if (value < 0 || value > UCHAR_MAX) goto error; } ((uint8_t *)pdata)[n] = (uint8_t)value; break; case 2: if (allow_signed) { if (value < SHRT_MIN || value > SHRT_MAX) goto error; } else { if (value < 0 || value > USHRT_MAX) goto error; } ((uint16_t *)pdata)[n] = (uint16_t)value; break; case 4: if (allow_signed) { if (value < INT_MIN || value > INT_MAX) goto error; } else { if (value > UINT_MAX) goto error; } ((uint32_t *)pdata)[n] = (uint32_t)value; break; case 8: ((uint64_t *)pdata)[n] = (uint64_t)value; break; default: goto error; } n++; } *psize = n * type_size; if (n != 0) return (1); /* success */ error: return (0); /* failure */ } /* * Return an integer value from an environment variable. */ int getenv_int(const char *name, int *data) { quad_t tmp; int rval; rval = getenv_quad(name, &tmp); if (rval) *data = (int) tmp; return (rval); } /* * Return an unsigned integer value from an environment variable. */ int getenv_uint(const char *name, unsigned int *data) { quad_t tmp; int rval; rval = getenv_quad(name, &tmp); if (rval) *data = (unsigned int) tmp; return (rval); } /* * Return an int64_t value from an environment variable. */ int getenv_int64(const char *name, int64_t *data) { quad_t tmp; int64_t rval; rval = getenv_quad(name, &tmp); if (rval) *data = (int64_t) tmp; return (rval); } /* * Return an uint64_t value from an environment variable. */ int getenv_uint64(const char *name, uint64_t *data) { quad_t tmp; uint64_t rval; rval = getenv_quad(name, &tmp); if (rval) *data = (uint64_t) tmp; return (rval); } /* * Return a long value from an environment variable. */ int getenv_long(const char *name, long *data) { quad_t tmp; int rval; rval = getenv_quad(name, &tmp); if (rval) *data = (long) tmp; return (rval); } /* * Return an unsigned long value from an environment variable. */ int getenv_ulong(const char *name, unsigned long *data) { quad_t tmp; int rval; rval = getenv_quad(name, &tmp); if (rval) *data = (unsigned long) tmp; return (rval); } /* * Return a quad_t value from an environment variable. */ int getenv_quad(const char *name, quad_t *data) { char value[KENV_MNAMELEN + 1 + KENV_MVALLEN + 1]; char *vtp; quad_t iv; if (!getenv_string(name, value, sizeof(value))) return (0); iv = strtoq(value, &vtp, 0); if (vtp == value || (vtp[0] != '\0' && vtp[1] != '\0')) return (0); switch (vtp[0]) { case 't': case 'T': iv *= 1024; /* FALLTHROUGH */ case 'g': case 'G': iv *= 1024; /* FALLTHROUGH */ case 'm': case 'M': iv *= 1024; /* FALLTHROUGH */ case 'k': case 'K': iv *= 1024; case '\0': break; default: return (0); } *data = iv; return (1); } /* * Find the next entry after the one which (cp) falls within, return a * pointer to its start or NULL if there are no more. */ static char * kernenv_next(char *cp) { if (cp != NULL) { while (*cp != 0) cp++; cp++; if (*cp == 0) cp = NULL; } return (cp); } void tunable_int_init(void *data) { struct tunable_int *d = (struct tunable_int *)data; TUNABLE_INT_FETCH(d->path, d->var); } void tunable_long_init(void *data) { struct tunable_long *d = (struct tunable_long *)data; TUNABLE_LONG_FETCH(d->path, d->var); } void tunable_ulong_init(void *data) { struct tunable_ulong *d = (struct tunable_ulong *)data; TUNABLE_ULONG_FETCH(d->path, d->var); } void tunable_int64_init(void *data) { struct tunable_int64 *d = (struct tunable_int64 *)data; TUNABLE_INT64_FETCH(d->path, d->var); } void tunable_uint64_init(void *data) { struct tunable_uint64 *d = (struct tunable_uint64 *)data; TUNABLE_UINT64_FETCH(d->path, d->var); } void tunable_quad_init(void *data) { struct tunable_quad *d = (struct tunable_quad *)data; TUNABLE_QUAD_FETCH(d->path, d->var); } void tunable_str_init(void *data) { struct tunable_str *d = (struct tunable_str *)data; TUNABLE_STR_FETCH(d->path, d->var, d->size); } Index: projects/clang700-import/sys/kern/kern_mutex.c =================================================================== --- projects/clang700-import/sys/kern/kern_mutex.c (revision 340426) +++ projects/clang700-import/sys/kern/kern_mutex.c (revision 340427) @@ -1,1292 +1,1292 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ */ /* * Machine independent bits of mutex implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_adaptive_mutexes.h" #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) #define ADAPTIVE_MUTEXES #endif #ifdef HWPMC_HOOKS #include PMC_SOFT_DEFINE( , , lock, failed); #endif /* * Return the mutex address when the lock cookie address is provided. * This functionality assumes that struct mtx* have a member named mtx_lock. */ #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) /* * Internal utility macros. */ #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) static void assert_mtx(const struct lock_object *lock, int what); #ifdef DDB static void db_show_mtx(const struct lock_object *lock); #endif static void lock_mtx(struct lock_object *lock, uintptr_t how); static void lock_spin(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_mtx(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_mtx(struct lock_object *lock); static uintptr_t unlock_spin(struct lock_object *lock); /* * Lock classes for sleep and spin mutexes. */ struct lock_class lock_class_mtx_sleep = { .lc_name = "sleep mutex", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_mtx, .lc_unlock = unlock_mtx, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; struct lock_class lock_class_mtx_spin = { .lc_name = "spin mutex", .lc_flags = LC_SPINLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_spin, .lc_unlock = unlock_spin, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; #ifdef ADAPTIVE_MUTEXES static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging"); static struct lock_delay_config __read_frequently mtx_delay; SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base, 0, ""); SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 0, ""); LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay); #endif static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL, "mtx spin debugging"); static struct lock_delay_config __read_frequently mtx_spin_delay; SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_spin_delay.base, 0, ""); SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_spin_delay.max, 0, ""); LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay); /* * System-wide mutexes */ struct mtx blocked_lock; struct mtx __exclusive_cache_line Giant; static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *); void assert_mtx(const struct lock_object *lock, int what) { mtx_assert((const struct mtx *)lock, what); } void lock_mtx(struct lock_object *lock, uintptr_t how) { mtx_lock((struct mtx *)lock); } void lock_spin(struct lock_object *lock, uintptr_t how) { panic("spin locks can only use msleep_spin"); } uintptr_t unlock_mtx(struct lock_object *lock) { struct mtx *m; m = (struct mtx *)lock; mtx_assert(m, MA_OWNED | MA_NOTRECURSED); mtx_unlock(m); return (0); } uintptr_t unlock_spin(struct lock_object *lock) { panic("spin locks can only use msleep_spin"); } #ifdef KDTRACE_HOOKS int owner_mtx(const struct lock_object *lock, struct thread **owner) { const struct mtx *m; uintptr_t x; m = (const struct mtx *)lock; x = m->mtx_lock; *owner = (struct thread *)(x & ~MTX_FLAGMASK); return (*owner != NULL); } #endif /* * Function versions of the inlined __mtx_* macros. These are used by * modules and can also be called from assembly language if needed. */ void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid, v; m = mtxlock2mtx(c); KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || !TD_IS_IDLETHREAD(curthread), ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); tid = (uintptr_t)curthread; v = MTX_UNOWNED; if (!_mtx_obtain_lock_fetch(m, &v, tid)) _mtx_lock_sleep(m, v, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, 0, 0, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); #ifdef LOCK_PROFILING __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line); #else __mtx_unlock(m, curthread, opts, file, line); #endif TD_LOCKS_DEC(curthread); } void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; #ifdef SMP uintptr_t tid, v; #endif m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_lock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0, ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); opts &= ~MTX_RECURSE; WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); #ifdef SMP spinlock_enter(); tid = (uintptr_t)curthread; v = MTX_UNOWNED; if (!_mtx_obtain_lock_fetch(m, &v, tid)) _mtx_lock_spin(m, v, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, 0, 0, file, line); #else __mtx_lock_spin(m, curthread, opts, file, line); #endif LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); } int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return (1); m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); KASSERT((opts & MTX_RECURSE) == 0, ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); if (__mtx_trylock_spin(m, curthread, opts, file, line)) { LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); return (1); } LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); return (0); } void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); __mtx_unlock_spin(m); } /* * The important part of mtx_trylock{,_flags}() * Tries to acquire lock `m.' If this function is called on a mutex that * is already owned, it will recursively acquire the lock. */ int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF) { struct thread *td; uintptr_t tid, v; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int rval; bool recursed; td = curthread; tid = (uintptr_t)td; if (SCHEDULER_STOPPED_TD(td)) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); rval = 1; recursed = false; v = MTX_UNOWNED; for (;;) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; if (v == MTX_UNOWNED) continue; if (v == tid && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0)) { m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); recursed = true; break; } rval = 0; break; } opts &= ~MTX_RECURSE; LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); if (rval) { WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); TD_LOCKS_INC(curthread); if (!recursed) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); } return (rval); } int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; m = mtxlock2mtx(c); return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG)); } /* * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. * * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ #if LOCK_DEBUG > 0 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, int line) #else void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) #endif { struct thread *td; struct mtx *m; struct turnstile *ts; uintptr_t tid; struct thread *owner; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) - int doing_lockprof; + int doing_lockprof = 0; #endif td = curthread; tid = (uintptr_t)td; m = mtxlock2mtx(c); #ifdef KDTRACE_HOOKS if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) { while (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) goto out_lockstat; } doing_lockprof = 1; all_time -= lockstat_nsecs(&m->lock_object); } #endif #ifdef LOCK_PROFILING doing_lockprof = 1; #endif if (SCHEDULER_STOPPED_TD(td)) return; #if defined(ADAPTIVE_MUTEXES) lock_delay_arg_init(&lda, &mtx_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif if (__predict_false(v == MTX_UNOWNED)) v = MTX_READ_VALUE(m); if (__predict_false(lv_mtx_owner(v) == td)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); #if LOCK_DEBUG > 0 opts &= ~MTX_RECURSE; #endif m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); return; } #if LOCK_DEBUG > 0 opts &= ~MTX_RECURSE; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR4(KTR_LOCK, "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", m->lock_object.lo_name, (void *)m->mtx_lock, file, line); for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_MUTEXES /* * If the owner is running on another CPU, spin until the * owner stops running or the state of the lock changes. */ owner = lv_mtx_owner(v); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&m->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, m, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); do { lock_delay(&lda); v = MTX_READ_VALUE(m); owner = lv_mtx_owner(v); } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); continue; } #endif ts = turnstile_trywait(&m->lock_object); v = MTX_READ_VALUE(m); retry_turnstile: /* * Check if the lock has been released while spinning for * the turnstile chain lock. */ if (v == MTX_UNOWNED) { turnstile_cancel(ts); continue; } #ifdef ADAPTIVE_MUTEXES /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ owner = lv_mtx_owner(v); if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); continue; } #endif /* * If the mutex isn't already contested and a failure occurs * setting the contested bit, the mutex was either released * or the state of the MTX_RECURSED bit changed. */ if ((v & MTX_CONTESTED) == 0 && !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) { goto retry_turnstile; } /* * We definitely must sleep for this lock. */ mtx_assert(m, MA_NOTOWNED); /* * Block on the turnstile. */ #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&m->lock_object); #endif #ifndef ADAPTIVE_MUTEXES owner = mtx_owner(m); #endif MPASS(owner == mtx_owner(m)); turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&m->lock_object); sleep_cnt++; #endif v = MTX_READ_VALUE(m); } #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return; #endif #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&m->lock_object); if (sleep_time) LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); /* * Only record the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); out_lockstat: #endif LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); } #ifdef SMP /* * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ #if LOCK_DEBUG > 0 void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, int line) #else void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) #endif { struct mtx *m; struct lock_delay_arg lda; uintptr_t tid; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) - int doing_lockprof; + int doing_lockprof = 0; #endif tid = (uintptr_t)curthread; m = mtxlock2mtx(c); #ifdef KDTRACE_HOOKS if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) { while (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) goto out_lockstat; } doing_lockprof = 1; spin_time -= lockstat_nsecs(&m->lock_object); } #endif #ifdef LOCK_PROFILING doing_lockprof = 1; #endif if (__predict_false(v == MTX_UNOWNED)) v = MTX_READ_VALUE(m); if (__predict_false(v == tid)) { m->mtx_recurse++; return; } if (SCHEDULER_STOPPED()) return; lock_delay_arg_init(&lda, &mtx_spin_delay); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } /* Give interrupts a chance while we spin. */ spinlock_exit(); do { if (__predict_true(lda.spin_cnt < 10000000)) { lock_delay(&lda); } else { _mtx_lock_indefinite_check(m, &lda); } v = MTX_READ_VALUE(m); } while (v != MTX_UNOWNED); spinlock_enter(); } if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return; #endif #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); if (lda.spin_cnt != 0) LOCKSTAT_RECORD1(spin__spin, m, spin_time); out_lockstat: #endif LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); } #endif /* SMP */ #ifdef INVARIANTS static void thread_lock_validate(struct mtx *m, int opts, const char *file, int line) { KASSERT(m->mtx_lock != MTX_DESTROYED, ("thread_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("thread_lock() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); } #else #define thread_lock_validate(m, opts, file, line) do { } while (0) #endif #ifndef LOCK_PROFILING #if LOCK_DEBUG > 0 void _thread_lock(struct thread *td, int opts, const char *file, int line) #else void _thread_lock(struct thread *td) #endif { struct mtx *m; uintptr_t tid, v; tid = (uintptr_t)curthread; if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire))) goto slowpath_noirq; spinlock_enter(); m = td->td_lock; thread_lock_validate(m, 0, file, line); v = MTX_READ_VALUE(m); if (__predict_true(v == MTX_UNOWNED)) { if (__predict_false(!_mtx_obtain_lock(m, tid))) goto slowpath_unlocked; } else if (v == tid) { m->mtx_recurse++; } else goto slowpath_unlocked; if (__predict_true(m == td->td_lock)) { WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line); return; } MPASS(m->mtx_recurse == 0); _mtx_release_lock_quick(m); slowpath_unlocked: spinlock_exit(); slowpath_noirq: #if LOCK_DEBUG > 0 thread_lock_flags_(td, opts, file, line); #else thread_lock_flags_(td, 0, 0, 0); #endif } #endif void thread_lock_flags_(struct thread *td, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid, v; struct lock_delay_arg lda; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) int doing_lockprof = 1; #endif tid = (uintptr_t)curthread; if (SCHEDULER_STOPPED()) { /* * Ensure that spinlock sections are balanced even when the * scheduler is stopped, since we may otherwise inadvertently * re-enable interrupts while dumping core. */ spinlock_enter(); return; } lock_delay_arg_init(&lda, &mtx_spin_delay); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif #ifdef LOCK_PROFILING doing_lockprof = 1; #elif defined(KDTRACE_HOOKS) doing_lockprof = lockstat_enabled; if (__predict_false(doing_lockprof)) spin_time -= lockstat_nsecs(&td->td_lock->lock_object); #endif spinlock_enter(); for (;;) { retry: m = td->td_lock; thread_lock_validate(m, opts, file, line); v = MTX_READ_VALUE(m); for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } if (v == tid) { m->mtx_recurse++; MPASS(m == td->td_lock); break; } lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); /* Give interrupts a chance while we spin. */ spinlock_exit(); do { if (__predict_true(lda.spin_cnt < 10000000)) { lock_delay(&lda); } else { _mtx_lock_indefinite_check(m, &lda); } if (m != td->td_lock) { spinlock_enter(); goto retry; } v = MTX_READ_VALUE(m); } while (v != MTX_UNOWNED); spinlock_enter(); } if (m == td->td_lock) break; MPASS(m->mtx_recurse == 0); _mtx_release_lock_quick(m); } LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return; #endif #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); #endif if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); #ifdef KDTRACE_HOOKS if (lda.spin_cnt != 0) LOCKSTAT_RECORD1(thread__spin, m, spin_time); #endif } struct mtx * thread_lock_block(struct thread *td) { struct mtx *lock; THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = &blocked_lock; mtx_unlock_spin(lock); return (lock); } void thread_lock_unblock(struct thread *td, struct mtx *new) { mtx_assert(new, MA_OWNED); MPASS(td->td_lock == &blocked_lock); atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); } void thread_lock_set(struct thread *td, struct mtx *new) { struct mtx *lock; mtx_assert(new, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = new; mtx_unlock_spin(lock); } /* * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. * * We are only called here if the lock is recursed, contested (i.e. we * need to wake up a blocked thread) or lockstat probe is active. */ #if LOCK_DEBUG > 0 void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, int line) #else void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v) #endif { struct mtx *m; struct turnstile *ts; uintptr_t tid; if (SCHEDULER_STOPPED()) return; tid = (uintptr_t)curthread; m = mtxlock2mtx(c); if (__predict_false(v == tid)) v = MTX_READ_VALUE(m); if (__predict_false(v & MTX_RECURSED)) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); return; } LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); if (v == tid && _mtx_release_lock(m, tid)) return; /* * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. */ turnstile_chain_lock(&m->lock_object); _mtx_release_lock_quick(m); ts = turnstile_lookup(&m->lock_object); MPASS(ts != NULL); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); /* * This turnstile is now no longer associated with the mutex. We can * unlock the chain lock so a new turnstile may take it's place. */ turnstile_unpend(ts); turnstile_chain_unlock(&m->lock_object); } /* * All the unlocking of MTX_SPIN locks is done inline. * See the __mtx_unlock_spin() macro for the details. */ /* * The backing function for the INVARIANTS-enabled mtx_assert() */ #ifdef INVARIANT_SUPPORT void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) { const struct mtx *m; if (panicstr != NULL || dumping || SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); switch (what) { case MA_OWNED: case MA_OWNED | MA_RECURSED: case MA_OWNED | MA_NOTRECURSED: if (!mtx_owned(m)) panic("mutex %s not owned at %s:%d", m->lock_object.lo_name, file, line); if (mtx_recursed(m)) { if ((what & MA_NOTRECURSED) != 0) panic("mutex %s recursed at %s:%d", m->lock_object.lo_name, file, line); } else if ((what & MA_RECURSED) != 0) { panic("mutex %s unrecursed at %s:%d", m->lock_object.lo_name, file, line); } break; case MA_NOTOWNED: if (mtx_owned(m)) panic("mutex %s owned at %s:%d", m->lock_object.lo_name, file, line); break; default: panic("unknown mtx_assert at %s:%d", file, line); } } #endif /* * General init routine used by the MTX_SYSINIT() macro. */ void mtx_sysinit(void *arg) { struct mtx_args *margs = arg; mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); } /* * Mutex initialization routine; initialize lock `m' of type contained in * `opts' with options contained in `opts' and name `name.' The optional * lock type `type' is used as a general lock category name for use with * witness. */ void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) { struct mtx *m; struct lock_class *class; int flags; m = mtxlock2mtx(c); MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p", __func__, name, &m->mtx_lock)); /* Determine lock class and lock flags. */ if (opts & MTX_SPIN) class = &lock_class_mtx_spin; else class = &lock_class_mtx_sleep; flags = 0; if (opts & MTX_QUIET) flags |= LO_QUIET; if (opts & MTX_RECURSE) flags |= LO_RECURSABLE; if ((opts & MTX_NOWITNESS) == 0) flags |= LO_WITNESS; if (opts & MTX_DUPOK) flags |= LO_DUPOK; if (opts & MTX_NOPROFILE) flags |= LO_NOPROFILE; if (opts & MTX_NEW) flags |= LO_NEW; /* Initialize mutex. */ lock_init(&m->lock_object, class, name, type, flags); m->mtx_lock = MTX_UNOWNED; m->mtx_recurse = 0; } /* * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be * passed in as a flag here because if the corresponding mtx_init() was * called with MTX_QUIET set, then it will already be set in the mutex's * flags. */ void _mtx_destroy(volatile uintptr_t *c) { struct mtx *m; m = mtxlock2mtx(c); if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else { MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); /* Perform the non-mtx related part of mtx_unlock_spin(). */ if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) spinlock_exit(); else TD_LOCKS_DEC(curthread); lock_profile_release_lock(&m->lock_object); /* Tell witness this isn't locked to make it happy. */ WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, __LINE__); } m->mtx_lock = MTX_DESTROYED; lock_destroy(&m->lock_object); } /* * Intialize the mutex code and system mutexes. This is called from the MD * startup code prior to mi_startup(). The per-CPU data space needs to be * setup before this is called. */ void mutex_init(void) { /* Setup turnstiles so that sleep mutexes work. */ init_turnstiles(); /* * Initialize mutexes. */ mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); mtx_init(&devmtx, "cdev", NULL, MTX_DEF); mtx_lock(&Giant); } static void __noinline _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap) { struct thread *td; ldap->spin_cnt++; if (ldap->spin_cnt < 60000000 || kdb_active || panicstr != NULL) cpu_lock_delay(); else { td = mtx_owner(m); /* If the mutex is unlocked, try again. */ if (td == NULL) return; printf( "spin lock %p (%s) held by %p (tid %d) too long\n", m, m->lock_object.lo_name, td, td->td_tid); #ifdef WITNESS witness_display_spinlock(&m->lock_object, td, printf); #endif panic("spin lock held too long"); } cpu_spinwait(); } void mtx_spin_wait_unlocked(struct mtx *m) { struct lock_delay_arg lda; KASSERT(m->mtx_lock != MTX_DESTROYED, ("%s() of destroyed mutex %p", __func__, m)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("%s() of sleep mutex %p (%s)", __func__, m, m->lock_object.lo_name)); KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m, m->lock_object.lo_name)); lda.spin_cnt = 0; while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) { if (__predict_true(lda.spin_cnt < 10000000)) { cpu_spinwait(); lda.spin_cnt++; } else { _mtx_lock_indefinite_check(m, &lda); } } } #ifdef DDB void db_show_mtx(const struct lock_object *lock) { struct thread *td; const struct mtx *m; m = (const struct mtx *)lock; db_printf(" flags: {"); if (LOCK_CLASS(lock) == &lock_class_mtx_spin) db_printf("SPIN"); else db_printf("DEF"); if (m->lock_object.lo_flags & LO_RECURSABLE) db_printf(", RECURSE"); if (m->lock_object.lo_flags & LO_DUPOK) db_printf(", DUPOK"); db_printf("}\n"); db_printf(" state: {"); if (mtx_unowned(m)) db_printf("UNOWNED"); else if (mtx_destroyed(m)) db_printf("DESTROYED"); else { db_printf("OWNED"); if (m->mtx_lock & MTX_CONTESTED) db_printf(", CONTESTED"); if (m->mtx_lock & MTX_RECURSED) db_printf(", RECURSED"); } db_printf("}\n"); if (!mtx_unowned(m) && !mtx_destroyed(m)) { td = mtx_owner(m); db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (mtx_recursed(m)) db_printf(" recursed: %d\n", m->mtx_recurse); } } #endif Index: projects/clang700-import/sys/kern/kern_rmlock.c =================================================================== --- projects/clang700-import/sys/kern/kern_rmlock.c (revision 340426) +++ projects/clang700-import/sys/kern/kern_rmlock.c (revision 340427) @@ -1,855 +1,855 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007 Stephan Uphoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Machine independent bits of reader/writer lock implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif /* * A cookie to mark destroyed rmlocks. This is stored in the head of * rm_activeReaders. */ #define RM_DESTROYED ((void *)0xdead) #define rm_destroyed(rm) \ (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED) #define RMPF_ONQUEUE 1 #define RMPF_SIGNAL 2 #ifndef INVARIANTS #define _rm_assert(c, what, file, line) #endif static void assert_rm(const struct lock_object *lock, int what); #ifdef DDB static void db_show_rm(const struct lock_object *lock); #endif static void lock_rm(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_rm(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_rm(struct lock_object *lock); struct lock_class lock_class_rm = { .lc_name = "rm", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_rm, #ifdef DDB .lc_ddb_show = db_show_rm, #endif .lc_lock = lock_rm, .lc_unlock = unlock_rm, #ifdef KDTRACE_HOOKS .lc_owner = owner_rm, #endif }; struct lock_class lock_class_rm_sleepable = { .lc_name = "sleepable rm", .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE, .lc_assert = assert_rm, #ifdef DDB .lc_ddb_show = db_show_rm, #endif .lc_lock = lock_rm, .lc_unlock = unlock_rm, #ifdef KDTRACE_HOOKS .lc_owner = owner_rm, #endif }; static void assert_rm(const struct lock_object *lock, int what) { rm_assert((const struct rmlock *)lock, what); } static void lock_rm(struct lock_object *lock, uintptr_t how) { struct rmlock *rm; struct rm_priotracker *tracker; rm = (struct rmlock *)lock; if (how == 0) rm_wlock(rm); else { tracker = (struct rm_priotracker *)how; rm_rlock(rm, tracker); } } static uintptr_t unlock_rm(struct lock_object *lock) { struct thread *td; struct pcpu *pc; struct rmlock *rm; struct rm_queue *queue; struct rm_priotracker *tracker; uintptr_t how; rm = (struct rmlock *)lock; tracker = NULL; how = 0; rm_assert(rm, RA_LOCKED | RA_NOTRECURSED); if (rm_wowned(rm)) rm_wunlock(rm); else { /* * Find the right rm_priotracker structure for curthread. * The guarantee about its uniqueness is given by the fact * we already asserted the lock wasn't recursively acquired. */ critical_enter(); td = curthread; pc = get_pcpu(); for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) { how = (uintptr_t)tracker; break; } } KASSERT(tracker != NULL, ("rm_priotracker is non-NULL when lock held in read mode")); critical_exit(); rm_runlock(rm, tracker); } return (how); } #ifdef KDTRACE_HOOKS static int owner_rm(const struct lock_object *lock, struct thread **owner) { const struct rmlock *rm; struct lock_class *lc; rm = (const struct rmlock *)lock; lc = LOCK_CLASS(&rm->rm_wlock_object); return (lc->lc_owner(&rm->rm_wlock_object, owner)); } #endif static struct mtx rm_spinlock; MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN); /* * Add or remove tracker from per-cpu list. * * The per-cpu list can be traversed at any time in forward direction from an * interrupt on the *local* cpu. */ static void inline rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker) { struct rm_queue *next; /* Initialize all tracker pointers */ tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; next = pc->pc_rm_queue.rmq_next; tracker->rmp_cpuQueue.rmq_next = next; /* rmq_prev is not used during froward traversal. */ next->rmq_prev = &tracker->rmp_cpuQueue; /* Update pointer to first element. */ pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; } /* * Return a count of the number of trackers the thread 'td' already * has on this CPU for the lock 'rm'. */ static int rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm, const struct thread *td) { struct rm_queue *queue; struct rm_priotracker *tracker; int count; count = 0; for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) count++; } return (count); } static void inline rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker) { struct rm_queue *next, *prev; next = tracker->rmp_cpuQueue.rmq_next; prev = tracker->rmp_cpuQueue.rmq_prev; /* Not used during forward traversal. */ next->rmq_prev = prev; /* Remove from list. */ prev->rmq_next = next; } static void rm_cleanIPI(void *arg) { struct pcpu *pc; struct rmlock *rm = arg; struct rm_priotracker *tracker; struct rm_queue *queue; pc = get_pcpu(); for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { tracker->rmp_flags = RMPF_ONQUEUE; mtx_lock_spin(&rm_spinlock); LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, rmp_qentry); mtx_unlock_spin(&rm_spinlock); } } } void rm_init_flags(struct rmlock *rm, const char *name, int opts) { struct lock_class *lc; int liflags, xflags; liflags = 0; if (!(opts & RM_NOWITNESS)) liflags |= LO_WITNESS; if (opts & RM_RECURSE) liflags |= LO_RECURSABLE; if (opts & RM_NEW) liflags |= LO_NEW; rm->rm_writecpus = all_cpus; LIST_INIT(&rm->rm_activeReaders); if (opts & RM_SLEEPABLE) { liflags |= LO_SLEEPABLE; lc = &lock_class_rm_sleepable; xflags = (opts & RM_NEW ? SX_NEW : 0); sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", xflags | SX_NOWITNESS); } else { lc = &lock_class_rm; xflags = (opts & RM_NEW ? MTX_NEW : 0); mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", xflags | MTX_NOWITNESS); } lock_init(&rm->lock_object, lc, name, NULL, liflags); } void rm_init(struct rmlock *rm, const char *name) { rm_init_flags(rm, name, 0); } void rm_destroy(struct rmlock *rm) { rm_assert(rm, RA_UNLOCKED); LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED; if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_destroy(&rm->rm_lock_sx); else mtx_destroy(&rm->rm_lock_mtx); lock_destroy(&rm->lock_object); } int rm_wowned(const struct rmlock *rm) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) return (sx_xlocked(&rm->rm_lock_sx)); else return (mtx_owned(&rm->rm_lock_mtx)); } void rm_sysinit(void *arg) { struct rm_args *args; args = arg; rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags); } static __noinline int _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct pcpu *pc; critical_enter(); pc = get_pcpu(); /* Check if we just need to do a proper critical_exit. */ if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) { critical_exit(); return (1); } /* Remove our tracker from the per-cpu list. */ rm_tracker_remove(pc, tracker); /* Check to see if the IPI granted us the lock after all. */ if (tracker->rmp_flags) { /* Just add back tracker - we hold the lock. */ rm_tracker_add(pc, tracker); critical_exit(); return (1); } /* * We allow readers to acquire a lock even if a writer is blocked if * the lock is recursive and the reader already holds the lock. */ if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { /* * Just grant the lock if this thread already has a tracker * for this lock on the per-cpu queue. */ if (rm_trackers_present(pc, rm, curthread) != 0) { mtx_lock_spin(&rm_spinlock); LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, rmp_qentry); tracker->rmp_flags = RMPF_ONQUEUE; mtx_unlock_spin(&rm_spinlock); rm_tracker_add(pc, tracker); critical_exit(); return (1); } } sched_unpin(); critical_exit(); if (trylock) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) { if (!sx_try_xlock(&rm->rm_lock_sx)) return (0); } else { if (!mtx_trylock(&rm->rm_lock_mtx)) return (0); } } else { if (rm->lock_object.lo_flags & LO_SLEEPABLE) { THREAD_SLEEPING_OK(); sx_xlock(&rm->rm_lock_sx); THREAD_NO_SLEEPING(); } else mtx_lock(&rm->rm_lock_mtx); } critical_enter(); pc = get_pcpu(); CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus); rm_tracker_add(pc, tracker); sched_pin(); critical_exit(); if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xunlock(&rm->rm_lock_sx); else mtx_unlock(&rm->rm_lock_mtx); return (1); } int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct thread *td = curthread; struct pcpu *pc; if (SCHEDULER_STOPPED()) return (1); tracker->rmp_flags = 0; tracker->rmp_thread = td; tracker->rmp_rmlock = rm; if (rm->lock_object.lo_flags & LO_SLEEPABLE) THREAD_NO_SLEEPING(); td->td_critnest++; /* critical_enter(); */ __compiler_membar(); pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_add(pc, tracker); sched_pin(); __compiler_membar(); td->td_critnest--; /* * Fast path to combine two common conditions into a single * conditional jump. */ if (__predict_true(0 == (td->td_owepreempt | CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))) return (1); /* We do not have a read token and need to acquire one. */ return _rm_rlock_hard(rm, tracker, trylock); } static __noinline void _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) { if (td->td_owepreempt) { td->td_critnest++; critical_exit(); } if (!tracker->rmp_flags) return; mtx_lock_spin(&rm_spinlock); LIST_REMOVE(tracker, rmp_qentry); if (tracker->rmp_flags & RMPF_SIGNAL) { struct rmlock *rm; struct turnstile *ts; rm = tracker->rmp_rmlock; turnstile_chain_lock(&rm->lock_object); mtx_unlock_spin(&rm_spinlock); ts = turnstile_lookup(&rm->lock_object); turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); turnstile_unpend(ts); turnstile_chain_unlock(&rm->lock_object); } else mtx_unlock_spin(&rm_spinlock); } void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) { struct pcpu *pc; struct thread *td = tracker->rmp_thread; if (SCHEDULER_STOPPED()) return; td->td_critnest++; /* critical_enter(); */ pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_remove(pc, tracker); td->td_critnest--; sched_unpin(); if (rm->lock_object.lo_flags & LO_SLEEPABLE) THREAD_SLEEPING_OK(); if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags))) return; _rm_unlock_hard(td, tracker); } void _rm_wlock(struct rmlock *rm) { struct rm_priotracker *prio; struct turnstile *ts; cpuset_t readcpus; if (SCHEDULER_STOPPED()) return; if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xlock(&rm->rm_lock_sx); else mtx_lock(&rm->rm_lock_mtx); if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { /* Get all read tokens back */ readcpus = all_cpus; CPU_NAND(&readcpus, &rm->rm_writecpus); rm->rm_writecpus = all_cpus; /* * Assumes rm->rm_writecpus update is visible on other CPUs * before rm_cleanIPI is called. */ #ifdef SMP smp_rendezvous_cpus(readcpus, smp_no_rendezvous_barrier, rm_cleanIPI, smp_no_rendezvous_barrier, rm); #else rm_cleanIPI(rm); #endif mtx_lock_spin(&rm_spinlock); while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { ts = turnstile_trywait(&rm->lock_object); prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; mtx_unlock_spin(&rm_spinlock); turnstile_wait(ts, prio->rmp_thread, TS_EXCLUSIVE_QUEUE); mtx_lock_spin(&rm_spinlock); } mtx_unlock_spin(&rm_spinlock); } } void _rm_wunlock(struct rmlock *rm) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xunlock(&rm->rm_lock_sx); else mtx_unlock(&rm->rm_lock_mtx); } #if LOCK_DEBUG > 0 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d", curthread, rm->lock_object.lo_name, file, line)); KASSERT(!rm_destroyed(rm), ("rm_wlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_UNLOCKED, file, line); WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); _rm_wlock(rm); LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(!rm_destroyed(rm), ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_WLOCKED, file, line); WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); _rm_wunlock(rm); TD_LOCKS_DEC(curthread); } int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line) { if (SCHEDULER_STOPPED()) return (1); #ifdef INVARIANTS if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) { critical_enter(); KASSERT(rm_trackers_present(get_pcpu(), rm, curthread) == 0, ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n", rm->lock_object.lo_name, file, line)); critical_exit(); } #endif KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d", curthread, rm->lock_object.lo_name, file, line)); KASSERT(!rm_destroyed(rm), ("rm_rlock() of destroyed rmlock @ %s:%d", file, line)); if (!trylock) { KASSERT(!rm_wowned(rm), ("rm_rlock: wlock already held for %s @ %s:%d", rm->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL); } if (_rm_rlock(rm, tracker, trylock)) { if (trylock) LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file, line); else LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line); WITNESS_LOCK(&rm->lock_object, 0, file, line); TD_LOCKS_INC(curthread); return (1); } else if (trylock) LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line); return (0); } void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(!rm_destroyed(rm), ("rm_runlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_RLOCKED, file, line); WITNESS_UNLOCK(&rm->lock_object, 0, file, line); LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); _rm_runlock(rm, tracker); TD_LOCKS_DEC(curthread); } #else /* * Just strip out file and line arguments if no lock debugging is enabled in * the kernel - we are called from a kernel module. */ void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { _rm_wlock(rm); } void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { _rm_wunlock(rm); } int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line) { return _rm_rlock(rm, tracker, trylock); } void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line) { _rm_runlock(rm, tracker); } #endif #ifdef INVARIANT_SUPPORT #ifndef INVARIANTS #undef _rm_assert #endif /* * Note that this does not need to use witness_assert() for read lock * assertions since an exact count of read locks held by this thread * is computable. */ void _rm_assert(const struct rmlock *rm, int what, const char *file, int line) { int count; - if (panicstr != NULL) + if (SCHEDULER_STOPPED()) return; switch (what) { case RA_LOCKED: case RA_LOCKED | RA_RECURSED: case RA_LOCKED | RA_NOTRECURSED: case RA_RLOCKED: case RA_RLOCKED | RA_RECURSED: case RA_RLOCKED | RA_NOTRECURSED: /* * Handle the write-locked case. Unlike other * primitives, writers can never recurse. */ if (rm_wowned(rm)) { if (what & RA_RLOCKED) panic("Lock %s exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); break; } critical_enter(); count = rm_trackers_present(get_pcpu(), rm, curthread); critical_exit(); if (count == 0) panic("Lock %s not %slocked @ %s:%d\n", rm->lock_object.lo_name, (what & RA_RLOCKED) ? "read " : "", file, line); if (count > 1) { if (what & RA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); } else if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); break; case RA_WLOCKED: if (!rm_wowned(rm)) panic("Lock %s not exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); break; case RA_UNLOCKED: if (rm_wowned(rm)) panic("Lock %s exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); critical_enter(); count = rm_trackers_present(get_pcpu(), rm, curthread); critical_exit(); if (count != 0) panic("Lock %s read locked @ %s:%d\n", rm->lock_object.lo_name, file, line); break; default: panic("Unknown rm lock assertion: %d @ %s:%d", what, file, line); } } #endif /* INVARIANT_SUPPORT */ #ifdef DDB static void print_tracker(struct rm_priotracker *tr) { struct thread *td; td = tr->rmp_thread; db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (tr->rmp_flags & RMPF_ONQUEUE) { db_printf("ONQUEUE"); if (tr->rmp_flags & RMPF_SIGNAL) db_printf(",SIGNAL"); } else db_printf("0"); db_printf("}\n"); } static void db_show_rm(const struct lock_object *lock) { struct rm_priotracker *tr; struct rm_queue *queue; const struct rmlock *rm; struct lock_class *lc; struct pcpu *pc; rm = (const struct rmlock *)lock; db_printf(" writecpus: "); ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus)); db_printf("\n"); db_printf(" per-CPU readers:\n"); STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tr = (struct rm_priotracker *)queue; if (tr->rmp_rmlock == rm) print_tracker(tr); } db_printf(" active readers:\n"); LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry) print_tracker(tr); lc = LOCK_CLASS(&rm->rm_wlock_object); db_printf("Backing write-lock (%s):\n", lc->lc_name); lc->lc_ddb_show(&rm->rm_wlock_object); } #endif Index: projects/clang700-import/sys/kern/kern_rwlock.c =================================================================== --- projects/clang700-import/sys/kern/kern_rwlock.c (revision 340426) +++ projects/clang700-import/sys/kern/kern_rwlock.c (revision 340427) @@ -1,1557 +1,1557 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Machine independent bits of reader/writer lock implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_no_adaptive_rwlocks.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) #define ADAPTIVE_RWLOCKS #endif #ifdef HWPMC_HOOKS #include PMC_SOFT_DECLARE( , , lock, failed); #endif /* * Return the rwlock address when the lock cookie address is provided. * This functionality assumes that struct rwlock* have a member named rw_lock. */ #define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock)) #ifdef DDB #include static void db_show_rwlock(const struct lock_object *lock); #endif static void assert_rw(const struct lock_object *lock, int what); static void lock_rw(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_rw(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_rw(struct lock_object *lock); struct lock_class lock_class_rw = { .lc_name = "rw", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, .lc_assert = assert_rw, #ifdef DDB .lc_ddb_show = db_show_rwlock, #endif .lc_lock = lock_rw, .lc_unlock = unlock_rw, #ifdef KDTRACE_HOOKS .lc_owner = owner_rw, #endif }; #ifdef ADAPTIVE_RWLOCKS static int __read_frequently rowner_retries; static int __read_frequently rowner_loops; static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging"); SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); static struct lock_delay_config __read_frequently rw_delay; SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base, 0, ""); SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max, 0, ""); static void rw_lock_delay_init(void *arg __unused) { lock_delay_default_init(&rw_delay); rowner_retries = 10; rowner_loops = max(10000, rw_delay.max); } LOCK_DELAY_SYSINIT(rw_lock_delay_init); #endif /* * Return a pointer to the owning thread if the lock is write-locked or * NULL if the lock is unlocked or read-locked. */ #define lv_rw_wowner(v) \ ((v) & RW_LOCK_READ ? NULL : \ (struct thread *)RW_OWNER((v))) #define rw_wowner(rw) lv_rw_wowner(RW_READ_VALUE(rw)) /* * Returns if a write owner is recursed. Write ownership is not assured * here and should be previously checked. */ #define rw_recursed(rw) ((rw)->rw_recurse != 0) /* * Return true if curthread helds the lock. */ #define rw_wlocked(rw) (rw_wowner((rw)) == curthread) /* * Return a pointer to the owning thread for this lock who should receive * any priority lent by threads that block on this lock. Currently this * is identical to rw_wowner(). */ #define rw_owner(rw) rw_wowner(rw) #ifndef INVARIANTS #define __rw_assert(c, what, file, line) #endif void assert_rw(const struct lock_object *lock, int what) { rw_assert((const struct rwlock *)lock, what); } void lock_rw(struct lock_object *lock, uintptr_t how) { struct rwlock *rw; rw = (struct rwlock *)lock; if (how) rw_rlock(rw); else rw_wlock(rw); } uintptr_t unlock_rw(struct lock_object *lock) { struct rwlock *rw; rw = (struct rwlock *)lock; rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); if (rw->rw_lock & RW_LOCK_READ) { rw_runlock(rw); return (1); } else { rw_wunlock(rw); return (0); } } #ifdef KDTRACE_HOOKS int owner_rw(const struct lock_object *lock, struct thread **owner) { const struct rwlock *rw = (const struct rwlock *)lock; uintptr_t x = rw->rw_lock; *owner = rw_wowner(rw); return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) : (*owner != NULL)); } #endif void _rw_init_flags(volatile uintptr_t *c, const char *name, int opts) { struct rwlock *rw; int flags; rw = rwlock2rw(c); MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | RW_RECURSE | RW_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p", __func__, name, &rw->rw_lock)); flags = LO_UPGRADABLE; if (opts & RW_DUPOK) flags |= LO_DUPOK; if (opts & RW_NOPROFILE) flags |= LO_NOPROFILE; if (!(opts & RW_NOWITNESS)) flags |= LO_WITNESS; if (opts & RW_RECURSE) flags |= LO_RECURSABLE; if (opts & RW_QUIET) flags |= LO_QUIET; if (opts & RW_NEW) flags |= LO_NEW; lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); rw->rw_lock = RW_UNLOCKED; rw->rw_recurse = 0; } void _rw_destroy(volatile uintptr_t *c) { struct rwlock *rw; rw = rwlock2rw(c); KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); rw->rw_lock = RW_DESTROYED; lock_destroy(&rw->lock_object); } void rw_sysinit(void *arg) { struct rw_args *args; args = arg; rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc, args->ra_flags); } int _rw_wowned(const volatile uintptr_t *c) { return (rw_wowner(rwlock2rw(c)) == curthread); } void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; uintptr_t tid, v; rw = rwlock2rw(c); KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || !TD_IS_IDLETHREAD(curthread), ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d", curthread, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); tid = (uintptr_t)curthread; v = RW_UNLOCKED; if (!_rw_write_lock_fetch(rw, &v, tid)) _rw_wlock_hard(rw, v, file, line); else LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 0, 0, file, line, LOCKSTAT_WRITER); LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } int __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) { struct thread *td; uintptr_t tid, v; int rval; bool recursed; td = curthread; tid = (uintptr_t)td; if (SCHEDULER_STOPPED_TD(td)) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d", curthread, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); rval = 1; recursed = false; v = RW_UNLOCKED; for (;;) { if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) break; if (v == RW_UNLOCKED) continue; if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) { rw->rw_recurse++; atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); break; } rval = 0; break; } LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); if (rval) { WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); if (!recursed) LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 0, 0, file, line, LOCKSTAT_WRITER); TD_LOCKS_INC(curthread); } return (rval); } int __rw_try_wlock(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; rw = rwlock2rw(c); return (__rw_try_wlock_int(rw LOCK_FILE_LINE_ARG)); } void _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; rw = rwlock2rw(c); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); __rw_assert(c, RA_WLOCKED, file, line); WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); #ifdef LOCK_PROFILING _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line); #else __rw_wunlock(rw, curthread, file, line); #endif TD_LOCKS_DEC(curthread); } /* * Determines whether a new reader can acquire a lock. Succeeds if the * reader already owns a read lock and the lock is locked for read to * prevent deadlock from reader recursion. Also succeeds if the lock * is unlocked and has no writer waiters or spinners. Failing otherwise * prioritizes writers before readers. */ static bool __always_inline __rw_can_read(struct thread *td, uintptr_t v, bool fp) { if ((v & (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == RW_LOCK_READ) return (true); if (!fp && td->td_rw_rlocks && (v & RW_LOCK_READ)) return (true); return (false); } static bool __always_inline __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp, bool fp LOCK_FILE_LINE_ARG_DEF) { /* * Handle the easy case. If no other thread has a write * lock, then try to bump up the count of read locks. Note * that we have to preserve the current state of the * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a * read lock, then rw_lock must have changed, so restart * the loop. Note that this handles the case of a * completely unlocked rwlock since such a lock is encoded * as a read lock with no waiters. */ while (__rw_can_read(td, *vp, fp)) { if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp, *vp + RW_ONE_READER)) { if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", __func__, rw, (void *)*vp, (void *)(*vp + RW_ONE_READER)); td->td_rw_rlocks++; return (true); } } return (false); } static void __noinline __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v LOCK_FILE_LINE_ARG_DEF) { struct turnstile *ts; struct thread *owner; #ifdef ADAPTIVE_RWLOCKS int spintries = 0; int i, n; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) - uintptr_t state; + uintptr_t state = 0; int doing_lockprof = 0; #endif #ifdef KDTRACE_HOOKS if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) { if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG)) goto out_lockstat; doing_lockprof = 1; all_time -= lockstat_nsecs(&rw->lock_object); state = v; } #endif #ifdef LOCK_PROFILING doing_lockprof = 1; state = v; #endif if (SCHEDULER_STOPPED()) return; #if defined(ADAPTIVE_RWLOCKS) lock_delay_arg_init(&lda, &rw_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&rw->lock_object, &contested, &waittime); for (;;) { if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG)) break; #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_RWLOCKS /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock * changes. */ if ((v & RW_LOCK_READ) == 0) { owner = (struct thread *)RW_OWNER(v); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, rw, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); do { lock_delay(&lda); v = RW_READ_VALUE(rw); owner = lv_rw_wowner(v); } while (owner != NULL && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; } } else { if ((v & RW_LOCK_WRITE_SPINNER) && RW_READERS(v) == 0) { MPASS(!__rw_can_read(td, v, false)); lock_delay_spin(2); v = RW_READ_VALUE(rw); continue; } if (spintries < rowner_retries) { spintries++; KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); n = RW_READERS(v); for (i = 0; i < rowner_loops; i += n) { lock_delay_spin(n); v = RW_READ_VALUE(rw); if (!(v & RW_LOCK_READ)) break; n = RW_READERS(v); if (n == 0) break; if (__rw_can_read(td, v, false)) break; } #ifdef KDTRACE_HOOKS lda.spin_cnt += rowner_loops - i; #endif KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); if (i < rowner_loops) continue; } } #endif /* * Okay, now it's the hard case. Some other thread already * has a write lock or there are write waiters present, * acquire the turnstile lock so we can begin the process * of blocking. */ ts = turnstile_trywait(&rw->lock_object); /* * The lock might have been released while we spun, so * recheck its state and restart the loop if needed. */ v = RW_READ_VALUE(rw); retry_ts: if (((v & RW_LOCK_WRITE_SPINNER) && RW_READERS(v) == 0) || __rw_can_read(td, v, false)) { turnstile_cancel(ts); continue; } owner = lv_rw_wowner(v); #ifdef ADAPTIVE_RWLOCKS /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ if (owner != NULL) { if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); continue; } } #endif /* * The lock is held in write mode or it already has waiters. */ MPASS(!__rw_can_read(td, v, false)); /* * If the RW_LOCK_READ_WAITERS flag is already set, then * we can go ahead and block. If it is not set then try * to set it. If we fail to set it drop the turnstile * lock and restart the loop. */ if (!(v & RW_LOCK_READ_WAITERS)) { if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, v | RW_LOCK_READ_WAITERS)) goto retry_ts; if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p set read waiters flag", __func__, rw); } /* * We were unable to acquire the lock and the read waiters * flag is set, so we must block on the turnstile. */ if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, rw); #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&rw->lock_object); #endif MPASS(owner == rw_owner(rw)); turnstile_wait(ts, owner, TS_SHARED_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&rw->lock_object); sleep_cnt++; #endif if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p resuming from turnstile", __func__, rw); v = RW_READ_VALUE(rw); } #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return; #endif #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&rw->lock_object); if (sleep_time) LOCKSTAT_RECORD4(rw__block, rw, sleep_time, LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); /* Record only the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); out_lockstat: #endif /* * TODO: acquire "owner of record" here. Here be turnstile dragons * however. turnstiles don't like owners changing between calls to * turnstile_wait() currently. */ LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, waittime, file, line, LOCKSTAT_READER); } void __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) { struct thread *td; uintptr_t v; td = curthread; KASSERT(kdb_active != 0 || SCHEDULER_STOPPED_TD(td) || !TD_IS_IDLETHREAD(td), ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d", td, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); KASSERT(rw_wowner(rw) != td, ("rw_rlock: wlock already held for %s @ %s:%d", rw->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); v = RW_READ_VALUE(rw); if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) || !__rw_rlock_try(rw, td, &v, true LOCK_FILE_LINE_ARG))) __rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG); else lock_profile_obtain_lock_success(&rw->lock_object, 0, 0, file, line); LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); WITNESS_LOCK(&rw->lock_object, 0, file, line); TD_LOCKS_INC(curthread); } void __rw_rlock(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; rw = rwlock2rw(c); __rw_rlock_int(rw LOCK_FILE_LINE_ARG); } int __rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) { uintptr_t x; if (SCHEDULER_STOPPED()) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d", curthread, rw->lock_object.lo_name, file, line)); x = rw->rw_lock; for (;;) { KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); if (!(x & RW_LOCK_READ)) break; if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) { LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, line); WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 0, 0, file, line, LOCKSTAT_READER); TD_LOCKS_INC(curthread); curthread->td_rw_rlocks++; return (1); } } LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); return (0); } int __rw_try_rlock(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; rw = rwlock2rw(c); return (__rw_try_rlock_int(rw LOCK_FILE_LINE_ARG)); } static bool __always_inline __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp) { for (;;) { if (RW_READERS(*vp) > 1 || !(*vp & RW_LOCK_WAITERS)) { if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp, *vp - RW_ONE_READER)) { if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeeded %p -> %p", __func__, rw, (void *)*vp, (void *)(*vp - RW_ONE_READER)); td->td_rw_rlocks--; return (true); } continue; } break; } return (false); } static void __noinline __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v LOCK_FILE_LINE_ARG_DEF) { struct turnstile *ts; uintptr_t setv, queue; if (SCHEDULER_STOPPED()) return; if (__rw_runlock_try(rw, td, &v)) goto out_lockstat; /* * Ok, we know we have waiters and we think we are the * last reader, so grab the turnstile lock. */ turnstile_chain_lock(&rw->lock_object); v = RW_READ_VALUE(rw); for (;;) { if (__rw_runlock_try(rw, td, &v)) break; MPASS(v & RW_LOCK_WAITERS); /* * Try to drop our lock leaving the lock in a unlocked * state. * * If you wanted to do explicit lock handoff you'd have to * do it here. You'd also want to use turnstile_signal() * and you'd have to handle the race where a higher * priority thread blocks on the write lock before the * thread you wakeup actually runs and have the new thread * "steal" the lock. For now it's a lot simpler to just * wakeup all of the waiters. * * As above, if we fail, then another thread might have * acquired a read lock, so drop the turnstile lock and * restart. */ setv = RW_UNLOCKED; queue = TS_SHARED_QUEUE; if (v & RW_LOCK_WRITE_WAITERS) { queue = TS_EXCLUSIVE_QUEUE; setv |= (v & RW_LOCK_READ_WAITERS); } setv |= (v & RW_LOCK_WRITE_SPINNER); if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, setv)) continue; if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", __func__, rw); /* * Ok. The lock is released and all that's left is to * wake up the waiters. Note that the lock might not be * free anymore, but in that case the writers will just * block again if they run before the new lock holder(s) * release the lock. */ ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); turnstile_broadcast(ts, queue); turnstile_unpend(ts); td->td_rw_rlocks--; break; } turnstile_chain_unlock(&rw->lock_object); out_lockstat: LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER); } void _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) { struct thread *td; uintptr_t v; KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line); WITNESS_UNLOCK(&rw->lock_object, 0, file, line); LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); td = curthread; v = RW_READ_VALUE(rw); if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) || !__rw_runlock_try(rw, td, &v))) __rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG); else lock_profile_release_lock(&rw->lock_object); TD_LOCKS_DEC(curthread); } void _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; rw = rwlock2rw(c); _rw_runlock_cookie_int(rw LOCK_FILE_LINE_ARG); } #ifdef ADAPTIVE_RWLOCKS static inline void rw_drop_critical(uintptr_t v, bool *in_critical, int *extra_work) { if (v & RW_LOCK_WRITE_SPINNER) return; if (*in_critical) { critical_exit(); *in_critical = false; (*extra_work)--; } } #else #define rw_drop_critical(v, in_critical, extra_work) do { } while (0) #endif /* * This function is called when we are unable to obtain a write lock on the * first try. This means that at least one other thread holds either a * read or write lock. */ void __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) { uintptr_t tid; struct rwlock *rw; struct turnstile *ts; struct thread *owner; #ifdef ADAPTIVE_RWLOCKS int spintries = 0; int i, n; enum { READERS, WRITER } sleep_reason = READERS; bool in_critical = false; #endif uintptr_t setv; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) - uintptr_t state; + uintptr_t state = 0; int doing_lockprof = 0; #endif int extra_work = 0; tid = (uintptr_t)curthread; rw = rwlock2rw(c); #ifdef KDTRACE_HOOKS if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) { while (v == RW_UNLOCKED) { if (_rw_write_lock_fetch(rw, &v, tid)) goto out_lockstat; } extra_work = 1; doing_lockprof = 1; all_time -= lockstat_nsecs(&rw->lock_object); state = v; } #endif #ifdef LOCK_PROFILING extra_work = 1; doing_lockprof = 1; state = v; #endif if (SCHEDULER_STOPPED()) return; #if defined(ADAPTIVE_RWLOCKS) lock_delay_arg_init(&lda, &rw_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif if (__predict_false(v == RW_UNLOCKED)) v = RW_READ_VALUE(rw); if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) { KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, ("%s: recursing but non-recursive rw %s @ %s:%d\n", __func__, rw->lock_object.lo_name, file, line)); rw->rw_recurse++; atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); return; } if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&rw->lock_object, &contested, &waittime); for (;;) { if (v == RW_UNLOCKED) { if (_rw_write_lock_fetch(rw, &v, tid)) break; continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_RWLOCKS if (v == (RW_LOCK_READ | RW_LOCK_WRITE_SPINNER)) { if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) break; continue; } /* * If the lock is write locked and the owner is * running on another CPU, spin until the owner stops * running or the state of the lock changes. */ if (!(v & RW_LOCK_READ)) { rw_drop_critical(v, &in_critical, &extra_work); sleep_reason = WRITER; owner = lv_rw_wowner(v); if (!TD_IS_RUNNING(owner)) goto ts; if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, rw, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); do { lock_delay(&lda); v = RW_READ_VALUE(rw); owner = lv_rw_wowner(v); } while (owner != NULL && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; } else if (RW_READERS(v) > 0) { sleep_reason = READERS; if (spintries == rowner_retries) goto ts; if (!(v & RW_LOCK_WRITE_SPINNER)) { if (!in_critical) { critical_enter(); in_critical = true; extra_work++; } if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, v | RW_LOCK_WRITE_SPINNER)) { critical_exit(); in_critical = false; extra_work--; continue; } } spintries++; KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); n = RW_READERS(v); for (i = 0; i < rowner_loops; i += n) { lock_delay_spin(n); v = RW_READ_VALUE(rw); if (!(v & RW_LOCK_WRITE_SPINNER)) break; if (!(v & RW_LOCK_READ)) break; n = RW_READERS(v); if (n == 0) break; } #ifdef KDTRACE_HOOKS lda.spin_cnt += i; #endif KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); if (i < rowner_loops) continue; } ts: #endif ts = turnstile_trywait(&rw->lock_object); v = RW_READ_VALUE(rw); retry_ts: owner = lv_rw_wowner(v); #ifdef ADAPTIVE_RWLOCKS /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ if (owner != NULL) { if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); rw_drop_critical(v, &in_critical, &extra_work); continue; } } else if (RW_READERS(v) > 0 && sleep_reason == WRITER) { turnstile_cancel(ts); rw_drop_critical(v, &in_critical, &extra_work); continue; } #endif /* * Check for the waiters flags about this rwlock. * If the lock was released, without maintain any pending * waiters queue, simply try to acquire it. * If a pending waiters queue is present, claim the lock * ownership and maintain the pending queue. */ setv = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); if ((v & ~setv) == RW_UNLOCKED) { setv &= ~RW_LOCK_WRITE_SPINNER; if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | setv)) { if (setv) turnstile_claim(ts); else turnstile_cancel(ts); break; } goto retry_ts; } #ifdef ADAPTIVE_RWLOCKS if (in_critical) { if ((v & RW_LOCK_WRITE_SPINNER) || !((v & RW_LOCK_WRITE_WAITERS))) { setv = v & ~RW_LOCK_WRITE_SPINNER; setv |= RW_LOCK_WRITE_WAITERS; if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, setv)) goto retry_ts; } critical_exit(); in_critical = false; extra_work--; } else { #endif /* * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to * set it. If we fail to set it, then loop back and try * again. */ if (!(v & RW_LOCK_WRITE_WAITERS)) { if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, v | RW_LOCK_WRITE_WAITERS)) goto retry_ts; if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p set write waiters flag", __func__, rw); } #ifdef ADAPTIVE_RWLOCKS } #endif /* * We were unable to acquire the lock and the write waiters * flag is set, so we must block on the turnstile. */ if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, rw); #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&rw->lock_object); #endif MPASS(owner == rw_owner(rw)); turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&rw->lock_object); sleep_cnt++; #endif if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p resuming from turnstile", __func__, rw); #ifdef ADAPTIVE_RWLOCKS spintries = 0; #endif v = RW_READ_VALUE(rw); } if (__predict_true(!extra_work)) return; #ifdef ADAPTIVE_RWLOCKS if (in_critical) critical_exit(); #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return; #endif #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&rw->lock_object); if (sleep_time) LOCKSTAT_RECORD4(rw__block, rw, sleep_time, LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); /* Record only the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); out_lockstat: #endif LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, waittime, file, line, LOCKSTAT_WRITER); } /* * This function is called if lockstat is active or the first try at releasing * a write lock failed. The latter means that the lock is recursed or one of * the 2 waiter bits must be set indicating that at least one thread is waiting * on this lock. */ void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) { struct rwlock *rw; struct turnstile *ts; uintptr_t tid, setv; int queue; tid = (uintptr_t)curthread; if (SCHEDULER_STOPPED()) return; rw = rwlock2rw(c); if (__predict_false(v == tid)) v = RW_READ_VALUE(rw); if (v & RW_LOCK_WRITER_RECURSED) { if (--(rw->rw_recurse) == 0) atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); return; } LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER); if (v == tid && _rw_write_unlock(rw, tid)) return; KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), ("%s: neither of the waiter flags are set", __func__)); if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); turnstile_chain_lock(&rw->lock_object); /* * Use the same algo as sx locks for now. Prefer waking up shared * waiters if we have any over writers. This is probably not ideal. * * 'v' is the value we are going to write back to rw_lock. If we * have waiters on both queues, we need to preserve the state of * the waiter flag for the queue we don't wake up. For now this is * hardcoded for the algorithm mentioned above. * * In the case of both readers and writers waiting we wakeup the * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a * new writer comes in before a reader it will claim the lock up * above. There is probably a potential priority inversion in * there that could be worked around either by waking both queues * of waiters or doing some complicated lock handoff gymnastics. */ setv = RW_UNLOCKED; v = RW_READ_VALUE(rw); queue = TS_SHARED_QUEUE; if (v & RW_LOCK_WRITE_WAITERS) { queue = TS_EXCLUSIVE_QUEUE; setv |= (v & RW_LOCK_READ_WAITERS); } atomic_store_rel_ptr(&rw->rw_lock, setv); /* Wake up all waiters for the specific queue. */ if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, queue == TS_SHARED_QUEUE ? "read" : "write"); ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); turnstile_broadcast(ts, queue); turnstile_unpend(ts); turnstile_chain_unlock(&rw->lock_object); } /* * Attempt to do a non-blocking upgrade from a read lock to a write * lock. This will only succeed if this thread holds a single read * lock. Returns true if the upgrade succeeded and false otherwise. */ int __rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) { uintptr_t v, setv, tid; struct turnstile *ts; int success; if (SCHEDULER_STOPPED()) return (1); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line); /* * Attempt to switch from one reader to a writer. If there * are any write waiters, then we will have to lock the * turnstile first to prevent races with another writer * calling turnstile_wait() before we have claimed this * turnstile. So, do the simple case of no waiters first. */ tid = (uintptr_t)curthread; success = 0; v = RW_READ_VALUE(rw); for (;;) { if (RW_READERS(v) > 1) break; if (!(v & RW_LOCK_WAITERS)) { success = atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid); if (!success) continue; break; } /* * Ok, we think we have waiters, so lock the turnstile. */ ts = turnstile_trywait(&rw->lock_object); v = RW_READ_VALUE(rw); retry_ts: if (RW_READERS(v) > 1) { turnstile_cancel(ts); break; } /* * Try to switch from one reader to a writer again. This time * we honor the current state of the waiters flags. * If we obtain the lock with the flags set, then claim * ownership of the turnstile. */ setv = tid | (v & RW_LOCK_WAITERS); success = atomic_fcmpset_ptr(&rw->rw_lock, &v, setv); if (success) { if (v & RW_LOCK_WAITERS) turnstile_claim(ts); else turnstile_cancel(ts); break; } goto retry_ts; } LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); if (success) { curthread->td_rw_rlocks--; WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); LOCKSTAT_RECORD0(rw__upgrade, rw); } return (success); } int __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; rw = rwlock2rw(c); return (__rw_try_upgrade_int(rw LOCK_FILE_LINE_ARG)); } /* * Downgrade a write lock into a single read lock. */ void __rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) { struct turnstile *ts; uintptr_t tid, v; int rwait, wwait; if (SCHEDULER_STOPPED()) return; KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); __rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line); #ifndef INVARIANTS if (rw_recursed(rw)) panic("downgrade of a recursed lock"); #endif WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); /* * Convert from a writer to a single reader. First we handle * the easy case with no waiters. If there are any waiters, we * lock the turnstile and "disown" the lock. */ tid = (uintptr_t)curthread; if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) goto out; /* * Ok, we think we have waiters, so lock the turnstile so we can * read the waiter flags without any races. */ turnstile_chain_lock(&rw->lock_object); v = rw->rw_lock & RW_LOCK_WAITERS; rwait = v & RW_LOCK_READ_WAITERS; wwait = v & RW_LOCK_WRITE_WAITERS; MPASS(rwait | wwait); /* * Downgrade from a write lock while preserving waiters flag * and give up ownership of the turnstile. */ ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); if (!wwait) v &= ~RW_LOCK_READ_WAITERS; atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); /* * Wake other readers if there are no writers pending. Otherwise they * won't be able to acquire the lock anyway. */ if (rwait && !wwait) { turnstile_broadcast(ts, TS_SHARED_QUEUE); turnstile_unpend(ts); } else turnstile_disown(ts); turnstile_chain_unlock(&rw->lock_object); out: curthread->td_rw_rlocks++; LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); LOCKSTAT_RECORD0(rw__downgrade, rw); } void __rw_downgrade(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; rw = rwlock2rw(c); __rw_downgrade_int(rw LOCK_FILE_LINE_ARG); } #ifdef INVARIANT_SUPPORT #ifndef INVARIANTS #undef __rw_assert #endif /* * In the non-WITNESS case, rw_assert() can only detect that at least * *some* thread owns an rlock, but it cannot guarantee that *this* * thread owns an rlock. */ void __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line) { const struct rwlock *rw; - if (panicstr != NULL) + if (SCHEDULER_STOPPED()) return; rw = rwlock2rw(c); switch (what) { case RA_LOCKED: case RA_LOCKED | RA_RECURSED: case RA_LOCKED | RA_NOTRECURSED: case RA_RLOCKED: case RA_RLOCKED | RA_RECURSED: case RA_RLOCKED | RA_NOTRECURSED: #ifdef WITNESS witness_assert(&rw->lock_object, what, file, line); #else /* * If some other thread has a write lock or we have one * and are asserting a read lock, fail. Also, if no one * has a lock at all, fail. */ if (rw->rw_lock == RW_UNLOCKED || (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED || rw_wowner(rw) != curthread))) panic("Lock %s not %slocked @ %s:%d\n", rw->lock_object.lo_name, (what & RA_RLOCKED) ? "read " : "", file, line); if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) { if (rw_recursed(rw)) { if (what & RA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", rw->lock_object.lo_name, file, line); } else if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rw->lock_object.lo_name, file, line); } #endif break; case RA_WLOCKED: case RA_WLOCKED | RA_RECURSED: case RA_WLOCKED | RA_NOTRECURSED: if (rw_wowner(rw) != curthread) panic("Lock %s not exclusively locked @ %s:%d\n", rw->lock_object.lo_name, file, line); if (rw_recursed(rw)) { if (what & RA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", rw->lock_object.lo_name, file, line); } else if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rw->lock_object.lo_name, file, line); break; case RA_UNLOCKED: #ifdef WITNESS witness_assert(&rw->lock_object, what, file, line); #else /* * If we hold a write lock fail. We can't reliably check * to see if we hold a read lock or not. */ if (rw_wowner(rw) == curthread) panic("Lock %s exclusively locked @ %s:%d\n", rw->lock_object.lo_name, file, line); #endif break; default: panic("Unknown rw lock assertion: %d @ %s:%d", what, file, line); } } #endif /* INVARIANT_SUPPORT */ #ifdef DDB void db_show_rwlock(const struct lock_object *lock) { const struct rwlock *rw; struct thread *td; rw = (const struct rwlock *)lock; db_printf(" state: "); if (rw->rw_lock == RW_UNLOCKED) db_printf("UNLOCKED\n"); else if (rw->rw_lock == RW_DESTROYED) { db_printf("DESTROYED\n"); return; } else if (rw->rw_lock & RW_LOCK_READ) db_printf("RLOCK: %ju locks\n", (uintmax_t)(RW_READERS(rw->rw_lock))); else { td = rw_wowner(rw); db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (rw_recursed(rw)) db_printf(" recursed: %u\n", rw->rw_recurse); } db_printf(" waiters: "); switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { case RW_LOCK_READ_WAITERS: db_printf("readers\n"); break; case RW_LOCK_WRITE_WAITERS: db_printf("writers\n"); break; case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: db_printf("readers and writers\n"); break; default: db_printf("none\n"); break; } } #endif Index: projects/clang700-import/sys/kern/kern_sx.c =================================================================== --- projects/clang700-import/sys/kern/kern_sx.c (revision 340426) +++ projects/clang700-import/sys/kern/kern_sx.c (revision 340427) @@ -1,1566 +1,1566 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007 Attilio Rao * Copyright (c) 2001 Jason Evans * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* * Shared/exclusive locks. This implementation attempts to ensure * deterministic lock granting behavior, so that slocks and xlocks are * interleaved. * * Priority propagation will not generally raise the priority of lock holders, * so should not be relied upon in combination with sx locks. */ #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_no_adaptive_sx.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_SX) #include #endif #ifdef DDB #include #endif #if defined(SMP) && !defined(NO_ADAPTIVE_SX) #define ADAPTIVE_SX #endif CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); #ifdef HWPMC_HOOKS #include PMC_SOFT_DECLARE( , , lock, failed); #endif /* Handy macros for sleep queues. */ #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 /* * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We * drop Giant anytime we have to sleep or if we adaptively spin. */ #define GIANT_DECLARE \ int _giantcnt = 0; \ WITNESS_SAVE_DECL(Giant) \ #define GIANT_SAVE(work) do { \ if (__predict_false(mtx_owned(&Giant))) { \ work++; \ WITNESS_SAVE(&Giant.lock_object, Giant); \ while (mtx_owned(&Giant)) { \ _giantcnt++; \ mtx_unlock(&Giant); \ } \ } \ } while (0) #define GIANT_RESTORE() do { \ if (_giantcnt > 0) { \ mtx_assert(&Giant, MA_NOTOWNED); \ while (_giantcnt--) \ mtx_lock(&Giant); \ WITNESS_RESTORE(&Giant.lock_object, Giant); \ } \ } while (0) /* * Returns true if an exclusive lock is recursed. It assumes * curthread currently has an exclusive lock. */ #define sx_recursed(sx) ((sx)->sx_recurse != 0) static void assert_sx(const struct lock_object *lock, int what); #ifdef DDB static void db_show_sx(const struct lock_object *lock); #endif static void lock_sx(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_sx(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_sx(struct lock_object *lock); struct lock_class lock_class_sx = { .lc_name = "sx", .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, .lc_assert = assert_sx, #ifdef DDB .lc_ddb_show = db_show_sx, #endif .lc_lock = lock_sx, .lc_unlock = unlock_sx, #ifdef KDTRACE_HOOKS .lc_owner = owner_sx, #endif }; #ifndef INVARIANTS #define _sx_assert(sx, what, file, line) #endif #ifdef ADAPTIVE_SX static __read_frequently u_int asx_retries; static __read_frequently u_int asx_loops; static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); static struct lock_delay_config __read_frequently sx_delay; SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base, 0, ""); SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, 0, ""); static void sx_lock_delay_init(void *arg __unused) { lock_delay_default_init(&sx_delay); asx_retries = 10; asx_loops = max(10000, sx_delay.max); } LOCK_DELAY_SYSINIT(sx_lock_delay_init); #endif void assert_sx(const struct lock_object *lock, int what) { sx_assert((const struct sx *)lock, what); } void lock_sx(struct lock_object *lock, uintptr_t how) { struct sx *sx; sx = (struct sx *)lock; if (how) sx_slock(sx); else sx_xlock(sx); } uintptr_t unlock_sx(struct lock_object *lock) { struct sx *sx; sx = (struct sx *)lock; sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); if (sx_xlocked(sx)) { sx_xunlock(sx); return (0); } else { sx_sunlock(sx); return (1); } } #ifdef KDTRACE_HOOKS int owner_sx(const struct lock_object *lock, struct thread **owner) { const struct sx *sx; uintptr_t x; sx = (const struct sx *)lock; x = sx->sx_lock; *owner = NULL; return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : ((*owner = (struct thread *)SX_OWNER(x)) != NULL)); } #endif void sx_sysinit(void *arg) { struct sx_args *sargs = arg; sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); } void sx_init_flags(struct sx *sx, const char *description, int opts) { int flags; MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p", __func__, description, &sx->sx_lock)); flags = LO_SLEEPABLE | LO_UPGRADABLE; if (opts & SX_DUPOK) flags |= LO_DUPOK; if (opts & SX_NOPROFILE) flags |= LO_NOPROFILE; if (!(opts & SX_NOWITNESS)) flags |= LO_WITNESS; if (opts & SX_RECURSE) flags |= LO_RECURSABLE; if (opts & SX_QUIET) flags |= LO_QUIET; if (opts & SX_NEW) flags |= LO_NEW; flags |= opts & SX_NOADAPTIVE; lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); sx->sx_lock = SX_LOCK_UNLOCKED; sx->sx_recurse = 0; } void sx_destroy(struct sx *sx) { KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); sx->sx_lock = SX_LOCK_DESTROYED; lock_destroy(&sx->lock_object); } int sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) { uintptr_t x; if (SCHEDULER_STOPPED()) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); x = sx->sx_lock; for (;;) { KASSERT(x != SX_LOCK_DESTROYED, ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); if (!(x & SX_LOCK_SHARED)) break; if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 0, 0, file, line, LOCKSTAT_READER); TD_LOCKS_INC(curthread); curthread->td_sx_slocks++; return (1); } } LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); return (0); } int sx_try_slock_(struct sx *sx, const char *file, int line) { return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG)); } int _sx_xlock(struct sx *sx, int opts, const char *file, int line) { uintptr_t tid, x; int error = 0; KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || !TD_IS_IDLETHREAD(curthread), ("sx_xlock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_xlock() of destroyed sx @ %s:%d", file, line)); WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); tid = (uintptr_t)curthread; x = SX_LOCK_UNLOCKED; if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG); else LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 0, 0, file, line, LOCKSTAT_WRITER); if (!error) { LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } return (error); } int sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) { struct thread *td; uintptr_t tid, x; int rval; bool recursed; td = curthread; tid = (uintptr_t)td; if (SCHEDULER_STOPPED_TD(td)) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); rval = 1; recursed = false; x = SX_LOCK_UNLOCKED; for (;;) { if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) break; if (x == SX_LOCK_UNLOCKED) continue; if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { sx->sx_recurse++; atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); break; } rval = 0; break; } LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); if (rval) { WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); if (!recursed) LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 0, 0, file, line, LOCKSTAT_WRITER); TD_LOCKS_INC(curthread); } return (rval); } int sx_try_xlock_(struct sx *sx, const char *file, int line) { return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG)); } void _sx_xunlock(struct sx *sx, const char *file, int line) { KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_XLOCKED, file, line); WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); #if LOCK_DEBUG > 0 _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); #else __sx_xunlock(sx, curthread, file, line); #endif TD_LOCKS_DEC(curthread); } /* * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. * This will only succeed if this thread holds a single shared lock. * Return 1 if if the upgrade succeed, 0 otherwise. */ int sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) { uintptr_t x; uintptr_t waiters; int success; if (SCHEDULER_STOPPED()) return (1); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_SLOCKED, file, line); /* * Try to switch from one shared lock to an exclusive lock. We need * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that * we will wake up the exclusive waiters when we drop the lock. */ success = 0; x = SX_READ_VALUE(sx); for (;;) { if (SX_SHARERS(x) > 1) break; waiters = (x & SX_LOCK_WAITERS); if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, (uintptr_t)curthread | waiters)) { success = 1; break; } } LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); if (success) { curthread->td_sx_slocks--; WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); LOCKSTAT_RECORD0(sx__upgrade, sx); } return (success); } int sx_try_upgrade_(struct sx *sx, const char *file, int line) { return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG)); } /* * Downgrade an unrecursed exclusive lock into a single shared lock. */ void sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) { uintptr_t x; int wakeup_swapper; if (SCHEDULER_STOPPED()) return; KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); #ifndef INVARIANTS if (sx_recursed(sx)) panic("downgrade of a recursed lock"); #endif WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); /* * Try to switch from an exclusive lock with no shared waiters * to one sharer with no shared waiters. If there are * exclusive waiters, we don't need to lock the sleep queue so * long as we preserve the flag. We do one quick try and if * that fails we grab the sleepq lock to keep the flags from * changing and do it the slow way. * * We have to lock the sleep queue if there are shared waiters * so we can wake them up. */ x = sx->sx_lock; if (!(x & SX_LOCK_SHARED_WAITERS) && atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | (x & SX_LOCK_EXCLUSIVE_WAITERS))) goto out; /* * Lock the sleep queue so we can read the waiters bits * without any races and wakeup any shared waiters. */ sleepq_lock(&sx->lock_object); /* * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single * shared lock. If there are any shared waiters, wake them up. */ wakeup_swapper = 0; x = sx->sx_lock; atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | (x & SX_LOCK_EXCLUSIVE_WAITERS)); if (x & SX_LOCK_SHARED_WAITERS) wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, SQ_SHARED_QUEUE); sleepq_release(&sx->lock_object); if (wakeup_swapper) kick_proc0(); out: curthread->td_sx_slocks++; LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); LOCKSTAT_RECORD0(sx__downgrade, sx); } void sx_downgrade_(struct sx *sx, const char *file, int line) { sx_downgrade_int(sx LOCK_FILE_LINE_ARG); } #ifdef ADAPTIVE_SX static inline void sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work) { if (x & SX_LOCK_WRITE_SPINNER) return; if (*in_critical) { critical_exit(); *in_critical = false; (*extra_work)--; } } #else #define sx_drop_critical(x, in_critical, extra_work) do { } while(0) #endif /* * This function represents the so-called 'hard case' for sx_xlock * operation. All 'easy case' failures are redirected to this. Note * that ideally this would be a static function, but it needs to be * accessible from at least sx.h. */ int _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) { GIANT_DECLARE; uintptr_t tid, setx; #ifdef ADAPTIVE_SX volatile struct thread *owner; u_int i, n, spintries = 0; enum { READERS, WRITER } sleep_reason = READERS; bool adaptive; bool in_critical = false; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int error = 0; #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) uintptr_t state = 0; int doing_lockprof = 0; #endif int extra_work = 0; tid = (uintptr_t)curthread; #ifdef KDTRACE_HOOKS if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) { while (x == SX_LOCK_UNLOCKED) { if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) goto out_lockstat; } extra_work = 1; doing_lockprof = 1; all_time -= lockstat_nsecs(&sx->lock_object); state = x; } #endif #ifdef LOCK_PROFILING extra_work = 1; doing_lockprof = 1; state = x; #endif if (SCHEDULER_STOPPED()) return (0); #if defined(ADAPTIVE_SX) lock_delay_arg_init(&lda, &sx_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif if (__predict_false(x == SX_LOCK_UNLOCKED)) x = SX_READ_VALUE(sx); /* If we already hold an exclusive lock, then recurse. */ if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) { KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", sx->lock_object.lo_name, file, line)); sx->sx_recurse++; atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); return (0); } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); #ifdef ADAPTIVE_SX adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0); #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime); #ifndef INVARIANTS GIANT_SAVE(extra_work); #endif for (;;) { if (x == SX_LOCK_UNLOCKED) { if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) break; continue; } #ifdef INVARIANTS GIANT_SAVE(extra_work); #endif #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_SX if (__predict_false(!adaptive)) goto sleepq; /* * If the lock is write locked and the owner is * running on another CPU, spin until the owner stops * running or the state of the lock changes. */ if ((x & SX_LOCK_SHARED) == 0) { sx_drop_critical(x, &in_critical, &extra_work); sleep_reason = WRITER; owner = lv_sx_owner(x); if (!TD_IS_RUNNING(owner)) goto sleepq; if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", sx->lock_object.lo_name); do { lock_delay(&lda); x = SX_READ_VALUE(sx); owner = lv_sx_owner(x); } while (owner != NULL && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; } else if (SX_SHARERS(x) > 0) { sleep_reason = READERS; if (spintries == asx_retries) goto sleepq; if (!(x & SX_LOCK_WRITE_SPINNER)) { if (!in_critical) { critical_enter(); in_critical = true; extra_work++; } if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, x | SX_LOCK_WRITE_SPINNER)) { critical_exit(); in_critical = false; extra_work--; continue; } } spintries++; KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", sx->lock_object.lo_name); n = SX_SHARERS(x); for (i = 0; i < asx_loops; i += n) { lock_delay_spin(n); x = SX_READ_VALUE(sx); if (!(x & SX_LOCK_WRITE_SPINNER)) break; if (!(x & SX_LOCK_SHARED)) break; n = SX_SHARERS(x); if (n == 0) break; } #ifdef KDTRACE_HOOKS lda.spin_cnt += i; #endif KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); if (i < asx_loops) continue; } sleepq: #endif sleepq_lock(&sx->lock_object); x = SX_READ_VALUE(sx); retry_sleepq: /* * If the lock was released while spinning on the * sleep queue chain lock, try again. */ if (x == SX_LOCK_UNLOCKED) { sleepq_release(&sx->lock_object); sx_drop_critical(x, &in_critical, &extra_work); continue; } #ifdef ADAPTIVE_SX /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the sleep queue * chain lock. If so, drop the sleep queue lock and try * again. */ if (adaptive) { if (!(x & SX_LOCK_SHARED)) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); sx_drop_critical(x, &in_critical, &extra_work); continue; } } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) { sleepq_release(&sx->lock_object); sx_drop_critical(x, &in_critical, &extra_work); continue; } } #endif /* * If an exclusive lock was released with both shared * and exclusive waiters and a shared waiter hasn't * woken up and acquired the lock yet, sx_lock will be * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. * If we see that value, try to acquire it once. Note * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS * as there are other exclusive waiters still. If we * fail, restart the loop. */ setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER); if ((x & ~setx) == SX_LOCK_SHARED) { setx &= ~SX_LOCK_WRITE_SPINNER; if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx)) goto retry_sleepq; sleepq_release(&sx->lock_object); CTR2(KTR_LOCK, "%s: %p claimed by new writer", __func__, sx); break; } #ifdef ADAPTIVE_SX /* * It is possible we set the SX_LOCK_WRITE_SPINNER bit. * It is an invariant that when the bit is set, there is * a writer ready to grab the lock. Thus clear the bit since * we are going to sleep. */ if (in_critical) { if ((x & SX_LOCK_WRITE_SPINNER) || !((x & SX_LOCK_EXCLUSIVE_WAITERS))) { setx = x & ~SX_LOCK_WRITE_SPINNER; setx |= SX_LOCK_EXCLUSIVE_WAITERS; if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, setx)) { goto retry_sleepq; } } critical_exit(); in_critical = false; } else { #endif /* * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, * than loop back and retry. */ if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, x | SX_LOCK_EXCLUSIVE_WAITERS)) { goto retry_sleepq; } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p set excl waiters flag", __func__, sx); } #ifdef ADAPTIVE_SX } #endif /* * Since we have been unable to acquire the exclusive * lock and the exclusive waiters flag is set, we have * to sleep. */ if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", __func__, sx); #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&sx->lock_object); #endif sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); if (!(opts & SX_INTERRUPTIBLE)) sleepq_wait(&sx->lock_object, 0); else error = sleepq_wait_sig(&sx->lock_object, 0); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&sx->lock_object); sleep_cnt++; #endif if (error) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: interruptible sleep by %p suspended by signal", __func__, sx); break; } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", __func__, sx); x = SX_READ_VALUE(sx); } if (__predict_true(!extra_work)) return (error); #ifdef ADAPTIVE_SX if (in_critical) critical_exit(); #endif GIANT_RESTORE(); #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!doing_lockprof)) return (error); #endif #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&sx->lock_object); if (sleep_time) LOCKSTAT_RECORD4(sx__block, sx, sleep_time, LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); out_lockstat: #endif if (!error) LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, contested, waittime, file, line, LOCKSTAT_WRITER); return (error); } /* * This function represents the so-called 'hard case' for sx_xunlock * operation. All 'easy case' failures are redirected to this. Note * that ideally this would be a static function, but it needs to be * accessible from at least sx.h. */ void _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) { uintptr_t tid, setx; int queue, wakeup_swapper; if (SCHEDULER_STOPPED()) return; tid = (uintptr_t)curthread; if (__predict_false(x == tid)) x = SX_READ_VALUE(sx); MPASS(!(x & SX_LOCK_SHARED)); if (__predict_false(x & SX_LOCK_RECURSED)) { /* The lock is recursed, unrecurse one level. */ if ((--sx->sx_recurse) == 0) atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); return; } LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); if (x == tid && atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) return; if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); sleepq_lock(&sx->lock_object); x = SX_READ_VALUE(sx); MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); /* * The wake up algorithm here is quite simple and probably not * ideal. It gives precedence to shared waiters if they are * present. For this condition, we have to preserve the * state of the exclusive waiters flag. * If interruptible sleeps left the shared queue empty avoid a * starvation for the threads sleeping on the exclusive queue by giving * them precedence and cleaning up the shared waiters bit anyway. */ setx = SX_LOCK_UNLOCKED; queue = SQ_SHARED_QUEUE; if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 && sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) { queue = SQ_EXCLUSIVE_QUEUE; setx |= (x & SX_LOCK_SHARED_WAITERS); } atomic_store_rel_ptr(&sx->sx_lock, setx); /* Wake up all the waiters for the specific queue. */ if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : "exclusive"); wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); sleepq_release(&sx->lock_object); if (wakeup_swapper) kick_proc0(); } static bool __always_inline __sx_can_read(struct thread *td, uintptr_t x, bool fp) { if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER)) == SX_LOCK_SHARED) return (true); if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED)) return (true); return (false); } static bool __always_inline __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp LOCK_FILE_LINE_ARG_DEF) { /* * If no other thread has an exclusive lock then try to bump up * the count of sharers. Since we have to preserve the state * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the * shared lock loop back and retry. */ while (__sx_can_read(td, *xp, fp)) { if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp, *xp + SX_ONE_SHARER)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", __func__, sx, (void *)*xp, (void *)(*xp + SX_ONE_SHARER)); td->td_sx_slocks++; return (true); } } return (false); } static int __noinline _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) { GIANT_DECLARE; struct thread *td; #ifdef ADAPTIVE_SX volatile struct thread *owner; u_int i, n, spintries = 0; bool adaptive; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int error = 0; #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) uintptr_t state = 0; #endif int extra_work = 0; td = curthread; #ifdef KDTRACE_HOOKS if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) { if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG)) goto out_lockstat; extra_work = 1; all_time -= lockstat_nsecs(&sx->lock_object); state = x; } #endif #ifdef LOCK_PROFILING extra_work = 1; state = x; #endif if (SCHEDULER_STOPPED()) return (0); #if defined(ADAPTIVE_SX) lock_delay_arg_init(&lda, &sx_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif #ifdef ADAPTIVE_SX adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0); #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime); #ifndef INVARIANTS GIANT_SAVE(extra_work); #endif /* * As with rwlocks, we don't make any attempt to try to block * shared locks once there is an exclusive waiter. */ for (;;) { if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG)) break; #ifdef INVARIANTS GIANT_SAVE(extra_work); #endif #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_SX if (__predict_false(!adaptive)) goto sleepq; /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock * changes. */ if ((x & SX_LOCK_SHARED) == 0) { owner = lv_sx_owner(x); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", sx->lock_object.lo_name); do { lock_delay(&lda); x = SX_READ_VALUE(sx); owner = lv_sx_owner(x); } while (owner != NULL && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; } } else { if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) { MPASS(!__sx_can_read(td, x, false)); lock_delay_spin(2); x = SX_READ_VALUE(sx); continue; } if (spintries < asx_retries) { KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", sx->lock_object.lo_name); n = SX_SHARERS(x); for (i = 0; i < asx_loops; i += n) { lock_delay_spin(n); x = SX_READ_VALUE(sx); if (!(x & SX_LOCK_SHARED)) break; n = SX_SHARERS(x); if (n == 0) break; if (__sx_can_read(td, x, false)) break; } #ifdef KDTRACE_HOOKS lda.spin_cnt += i; #endif KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); if (i < asx_loops) continue; } } sleepq: #endif /* * Some other thread already has an exclusive lock, so * start the process of blocking. */ sleepq_lock(&sx->lock_object); x = SX_READ_VALUE(sx); retry_sleepq: if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) || __sx_can_read(td, x, false)) { sleepq_release(&sx->lock_object); continue; } #ifdef ADAPTIVE_SX /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock * changes. */ if (!(x & SX_LOCK_SHARED) && adaptive) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); x = SX_READ_VALUE(sx); continue; } } #endif /* * Try to set the SX_LOCK_SHARED_WAITERS flag. If we * fail to set it drop the sleep queue lock and loop * back. */ if (!(x & SX_LOCK_SHARED_WAITERS)) { if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, x | SX_LOCK_SHARED_WAITERS)) goto retry_sleepq; if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p set shared waiters flag", __func__, sx); } /* * Since we have been unable to acquire the shared lock, * we have to sleep. */ if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", __func__, sx); #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&sx->lock_object); #endif sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); if (!(opts & SX_INTERRUPTIBLE)) sleepq_wait(&sx->lock_object, 0); else error = sleepq_wait_sig(&sx->lock_object, 0); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&sx->lock_object); sleep_cnt++; #endif if (error) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: interruptible sleep by %p suspended by signal", __func__, sx); break; } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", __func__, sx); x = SX_READ_VALUE(sx); } #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) if (__predict_true(!extra_work)) return (error); #endif #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&sx->lock_object); if (sleep_time) LOCKSTAT_RECORD4(sx__block, sx, sleep_time, LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); out_lockstat: #endif if (error == 0) { LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, contested, waittime, file, line, LOCKSTAT_READER); } GIANT_RESTORE(); return (error); } int _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) { struct thread *td; uintptr_t x; int error; KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || !TD_IS_IDLETHREAD(curthread), ("sx_slock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_slock() of destroyed sx @ %s:%d", file, line)); WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); error = 0; td = curthread; x = SX_READ_VALUE(sx); if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) || !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG))) error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); else lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, file, line); if (error == 0) { LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); WITNESS_LOCK(&sx->lock_object, 0, file, line); TD_LOCKS_INC(curthread); } return (error); } int _sx_slock(struct sx *sx, int opts, const char *file, int line) { return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG)); } static bool __always_inline _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp) { for (;;) { if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) { if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp, *xp - SX_ONE_SHARER)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeeded %p -> %p", __func__, sx, (void *)*xp, (void *)(*xp - SX_ONE_SHARER)); td->td_sx_slocks--; return (true); } continue; } break; } return (false); } static void __noinline _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x LOCK_FILE_LINE_ARG_DEF) { int wakeup_swapper = 0; uintptr_t setx, queue; if (SCHEDULER_STOPPED()) return; if (_sx_sunlock_try(sx, td, &x)) goto out_lockstat; sleepq_lock(&sx->lock_object); x = SX_READ_VALUE(sx); for (;;) { if (_sx_sunlock_try(sx, td, &x)) break; /* * Wake up semantic here is quite simple: * Just wake up all the exclusive waiters. * Note that the state of the lock could have changed, * so if it fails loop back and retry. */ setx = SX_LOCK_UNLOCKED; queue = SQ_SHARED_QUEUE; if (x & SX_LOCK_EXCLUSIVE_WAITERS) { setx |= (x & SX_LOCK_SHARED_WAITERS); queue = SQ_EXCLUSIVE_QUEUE; } setx |= (x & SX_LOCK_WRITE_SPINNER); if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) continue; if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p waking up all thread on" "exclusive queue", __func__, sx); wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); td->td_sx_slocks--; break; } sleepq_release(&sx->lock_object); if (wakeup_swapper) kick_proc0(); out_lockstat: LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); } void _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) { struct thread *td; uintptr_t x; KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_SLOCKED, file, line); WITNESS_UNLOCK(&sx->lock_object, 0, file, line); LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); td = curthread; x = SX_READ_VALUE(sx); if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) || !_sx_sunlock_try(sx, td, &x))) _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG); else lock_profile_release_lock(&sx->lock_object); TD_LOCKS_DEC(curthread); } void _sx_sunlock(struct sx *sx, const char *file, int line) { _sx_sunlock_int(sx LOCK_FILE_LINE_ARG); } #ifdef INVARIANT_SUPPORT #ifndef INVARIANTS #undef _sx_assert #endif /* * In the non-WITNESS case, sx_assert() can only detect that at least * *some* thread owns an slock, but it cannot guarantee that *this* * thread owns an slock. */ void _sx_assert(const struct sx *sx, int what, const char *file, int line) { #ifndef WITNESS int slocked = 0; #endif - if (panicstr != NULL) + if (SCHEDULER_STOPPED()) return; switch (what) { case SA_SLOCKED: case SA_SLOCKED | SA_NOTRECURSED: case SA_SLOCKED | SA_RECURSED: #ifndef WITNESS slocked = 1; /* FALLTHROUGH */ #endif case SA_LOCKED: case SA_LOCKED | SA_NOTRECURSED: case SA_LOCKED | SA_RECURSED: #ifdef WITNESS witness_assert(&sx->lock_object, what, file, line); #else /* * If some other thread has an exclusive lock or we * have one and are asserting a shared lock, fail. * Also, if no one has a lock at all, fail. */ if (sx->sx_lock == SX_LOCK_UNLOCKED || (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || sx_xholder(sx) != curthread))) panic("Lock %s not %slocked @ %s:%d\n", sx->lock_object.lo_name, slocked ? "share " : "", file, line); if (!(sx->sx_lock & SX_LOCK_SHARED)) { if (sx_recursed(sx)) { if (what & SA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", sx->lock_object.lo_name, file, line); } else if (what & SA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", sx->lock_object.lo_name, file, line); } #endif break; case SA_XLOCKED: case SA_XLOCKED | SA_NOTRECURSED: case SA_XLOCKED | SA_RECURSED: if (sx_xholder(sx) != curthread) panic("Lock %s not exclusively locked @ %s:%d\n", sx->lock_object.lo_name, file, line); if (sx_recursed(sx)) { if (what & SA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", sx->lock_object.lo_name, file, line); } else if (what & SA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", sx->lock_object.lo_name, file, line); break; case SA_UNLOCKED: #ifdef WITNESS witness_assert(&sx->lock_object, what, file, line); #else /* * If we hold an exclusve lock fail. We can't * reliably check to see if we hold a shared lock or * not. */ if (sx_xholder(sx) == curthread) panic("Lock %s exclusively locked @ %s:%d\n", sx->lock_object.lo_name, file, line); #endif break; default: panic("Unknown sx lock assertion: %d @ %s:%d", what, file, line); } } #endif /* INVARIANT_SUPPORT */ #ifdef DDB static void db_show_sx(const struct lock_object *lock) { struct thread *td; const struct sx *sx; sx = (const struct sx *)lock; db_printf(" state: "); if (sx->sx_lock == SX_LOCK_UNLOCKED) db_printf("UNLOCKED\n"); else if (sx->sx_lock == SX_LOCK_DESTROYED) { db_printf("DESTROYED\n"); return; } else if (sx->sx_lock & SX_LOCK_SHARED) db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); else { td = sx_xholder(sx); db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (sx_recursed(sx)) db_printf(" recursed: %d\n", sx->sx_recurse); } db_printf(" waiters: "); switch(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { case SX_LOCK_SHARED_WAITERS: db_printf("shared\n"); break; case SX_LOCK_EXCLUSIVE_WAITERS: db_printf("exclusive\n"); break; case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: db_printf("exclusive and shared\n"); break; default: db_printf("none\n"); } } /* * Check to see if a thread that is blocked on a sleep queue is actually * blocked on an sx lock. If so, output some details and return true. * If the lock has an exclusive owner, return that in *ownerp. */ int sx_chain(struct thread *td, struct thread **ownerp) { struct sx *sx; /* * Check to see if this thread is blocked on an sx lock. * First, we check the lock class. If that is ok, then we * compare the lock name against the wait message. */ sx = td->td_wchan; if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || sx->lock_object.lo_name != td->td_wmesg) return (0); /* We think we have an sx lock, so output some details. */ db_printf("blocked on sx \"%s\" ", td->td_wmesg); *ownerp = sx_xholder(sx); if (sx->sx_lock & SX_LOCK_SHARED) db_printf("SLOCK (count %ju)\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); else db_printf("XLOCK\n"); return (1); } #endif Index: projects/clang700-import/sys/kern/kern_thread.c =================================================================== --- projects/clang700-import/sys/kern/kern_thread.c (revision 340426) +++ projects/clang700-import/sys/kern/kern_thread.c (revision 340427) @@ -1,1264 +1,1267 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2001 Julian Elischer . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include "opt_witness.h" #include "opt_hwpmc_hooks.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include #include #include /* * Asserts below verify the stability of struct thread and struct proc * layout, as exposed by KBI to modules. On head, the KBI is allowed * to drift, change to the structures must be accompanied by the * assert update. * * On the stable branches after KBI freeze, conditions must not be * violated. Typically new fields are moved to the end of the * structures. */ #ifdef __amd64__ _Static_assert(offsetof(struct thread, td_flags) == 0xfc, "struct thread KBI td_flags"); _Static_assert(offsetof(struct thread, td_pflags) == 0x104, "struct thread KBI td_pflags"); _Static_assert(offsetof(struct thread, td_frame) == 0x470, "struct thread KBI td_frame"); _Static_assert(offsetof(struct thread, td_emuldata) == 0x528, "struct thread KBI td_emuldata"); _Static_assert(offsetof(struct proc, p_flag) == 0xb0, "struct proc KBI p_flag"); _Static_assert(offsetof(struct proc, p_pid) == 0xbc, "struct proc KBI p_pid"); _Static_assert(offsetof(struct proc, p_filemon) == 0x3d0, "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x3e4, "struct proc KBI p_comm"); _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b8, "struct proc KBI p_emuldata"); #endif #ifdef __i386__ _Static_assert(offsetof(struct thread, td_flags) == 0x98, "struct thread KBI td_flags"); _Static_assert(offsetof(struct thread, td_pflags) == 0xa0, "struct thread KBI td_pflags"); _Static_assert(offsetof(struct thread, td_frame) == 0x2e8, "struct thread KBI td_frame"); _Static_assert(offsetof(struct thread, td_emuldata) == 0x334, "struct thread KBI td_emuldata"); _Static_assert(offsetof(struct proc, p_flag) == 0x68, "struct proc KBI p_flag"); _Static_assert(offsetof(struct proc, p_pid) == 0x74, "struct proc KBI p_pid"); _Static_assert(offsetof(struct proc, p_filemon) == 0x27c, "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x28c, "struct proc KBI p_comm"); _Static_assert(offsetof(struct proc, p_emuldata) == 0x318, "struct proc KBI p_emuldata"); #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE(proc, , , lwp__exit); /* * thread related storage. */ static uma_zone_t thread_zone; TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); static struct mtx zombie_lock; MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); static void thread_zombie(struct thread *); static int thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary); #define TID_BUFFER_SIZE 1024 struct mtx tid_lock; static struct unrhdr *tid_unrhdr; static lwpid_t tid_buffer[TID_BUFFER_SIZE]; static int tid_head, tid_tail; static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); struct tidhashhead *tidhashtbl; u_long tidhash; struct rwlock tidhash_lock; EVENTHANDLER_LIST_DEFINE(thread_ctor); EVENTHANDLER_LIST_DEFINE(thread_dtor); EVENTHANDLER_LIST_DEFINE(thread_init); EVENTHANDLER_LIST_DEFINE(thread_fini); static lwpid_t tid_alloc(void) { lwpid_t tid; tid = alloc_unr(tid_unrhdr); if (tid != -1) return (tid); mtx_lock(&tid_lock); if (tid_head == tid_tail) { mtx_unlock(&tid_lock); return (-1); } tid = tid_buffer[tid_head]; tid_head = (tid_head + 1) % TID_BUFFER_SIZE; mtx_unlock(&tid_lock); return (tid); } static void tid_free(lwpid_t tid) { lwpid_t tmp_tid = -1; mtx_lock(&tid_lock); if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { tmp_tid = tid_buffer[tid_head]; tid_head = (tid_head + 1) % TID_BUFFER_SIZE; } tid_buffer[tid_tail] = tid; tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; mtx_unlock(&tid_lock); if (tmp_tid != -1) free_unr(tid_unrhdr, tmp_tid); } /* * Prepare a thread for use. */ static int thread_ctor(void *mem, int size, void *arg, int flags) { struct thread *td; td = (struct thread *)mem; td->td_state = TDS_INACTIVE; td->td_oncpu = NOCPU; td->td_tid = tid_alloc(); /* * Note that td_critnest begins life as 1 because the thread is not * running and is thereby implicitly waiting to be on the receiving * end of a context switch. */ td->td_critnest = 1; td->td_lend_user_pri = PRI_MAX; EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); #ifdef AUDIT audit_thread_alloc(td); #endif umtx_thread_alloc(td); return (0); } /* * Reclaim a thread after use. */ static void thread_dtor(void *mem, int size, void *arg) { struct thread *td; td = (struct thread *)mem; #ifdef INVARIANTS /* Verify that this thread is in a safe state to free. */ switch (td->td_state) { case TDS_INHIBITED: case TDS_RUNNING: case TDS_CAN_RUN: case TDS_RUNQ: /* * We must never unlink a thread that is in one of * these states, because it is currently active. */ panic("bad state for thread unlinking"); /* NOTREACHED */ case TDS_INACTIVE: break; default: panic("bad thread state"); /* NOTREACHED */ } #endif #ifdef AUDIT audit_thread_free(td); #endif /* Free all OSD associated to this thread. */ osd_thread_exit(td); td_softdep_cleanup(td); MPASS(td->td_su == NULL); EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); tid_free(td->td_tid); } /* * Initialize type-stable parts of a thread (when newly created). */ static int thread_init(void *mem, int size, int flags) { struct thread *td; td = (struct thread *)mem; td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); td->td_rlqe = NULL; EVENTHANDLER_DIRECT_INVOKE(thread_init, td); umtx_thread_init(td); + epoch_thread_init(td); td->td_kstack = 0; td->td_sel = NULL; return (0); } /* * Tear down type-stable parts of a thread (just before being discarded). */ static void thread_fini(void *mem, int size) { struct thread *td; td = (struct thread *)mem; EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); rlqentry_free(td->td_rlqe); turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); umtx_thread_fini(td); + epoch_thread_fini(td); seltdfini(td); } /* * For a newly created process, * link up all the structures and its initial threads etc. * called from: * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. * proc_dtor() (should go away) * proc_init() */ void proc_linkup0(struct proc *p, struct thread *td) { TAILQ_INIT(&p->p_threads); /* all threads in proc */ proc_linkup(p, td); } void proc_linkup(struct proc *p, struct thread *td) { sigqueue_init(&p->p_sigqueue, p); p->p_ksi = ksiginfo_alloc(1); if (p->p_ksi != NULL) { /* XXX p_ksi may be null if ksiginfo zone is not ready */ p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; } LIST_INIT(&p->p_mqnotifier); p->p_numthreads = 0; thread_link(td, p); } /* * Initialize global thread allocation resources. */ void threadinit(void) { mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); /* * pid_max cannot be greater than PID_MAX. * leave one number for thread0. */ tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), thread_ctor, thread_dtor, thread_init, thread_fini, 32 - 1, UMA_ZONE_NOFREE); tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); rw_init(&tidhash_lock, "tidhash"); } /* * Place an unused thread on the zombie list. * Use the slpq as that must be unused by now. */ void thread_zombie(struct thread *td) { mtx_lock_spin(&zombie_lock); TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); mtx_unlock_spin(&zombie_lock); } /* * Release a thread that has exited after cpu_throw(). */ void thread_stash(struct thread *td) { atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); thread_zombie(td); } /* * Reap zombie resources. */ void thread_reap(void) { struct thread *td_first, *td_next; /* * Don't even bother to lock if none at this instant, * we really don't care about the next instant. */ if (!TAILQ_EMPTY(&zombie_threads)) { mtx_lock_spin(&zombie_lock); td_first = TAILQ_FIRST(&zombie_threads); if (td_first) TAILQ_INIT(&zombie_threads); mtx_unlock_spin(&zombie_lock); while (td_first) { td_next = TAILQ_NEXT(td_first, td_slpq); thread_cow_free(td_first); thread_free(td_first); td_first = td_next; } } } /* * Allocate a thread. */ struct thread * thread_alloc(int pages) { struct thread *td; thread_reap(); /* check if any zombies to get */ td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); if (!vm_thread_new(td, pages)) { uma_zfree(thread_zone, td); return (NULL); } cpu_thread_alloc(td); return (td); } int thread_alloc_stack(struct thread *td, int pages) { KASSERT(td->td_kstack == 0, ("thread_alloc_stack called on a thread with kstack")); if (!vm_thread_new(td, pages)) return (0); cpu_thread_alloc(td); return (1); } /* * Deallocate a thread. */ void thread_free(struct thread *td) { lock_profile_thread_exit(td); if (td->td_cpuset) cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_free(td); if (td->td_kstack != 0) vm_thread_dispose(td); callout_drain(&td->td_slpcallout); uma_zfree(thread_zone, td); } void thread_cow_get_proc(struct thread *newtd, struct proc *p) { PROC_LOCK_ASSERT(p, MA_OWNED); newtd->td_ucred = crhold(p->p_ucred); newtd->td_limit = lim_hold(p->p_limit); newtd->td_cowgen = p->p_cowgen; } void thread_cow_get(struct thread *newtd, struct thread *td) { newtd->td_ucred = crhold(td->td_ucred); newtd->td_limit = lim_hold(td->td_limit); newtd->td_cowgen = td->td_cowgen; } void thread_cow_free(struct thread *td) { if (td->td_ucred != NULL) crfree(td->td_ucred); if (td->td_limit != NULL) lim_free(td->td_limit); } void thread_cow_update(struct thread *td) { struct proc *p; struct ucred *oldcred; struct plimit *oldlimit; p = td->td_proc; oldcred = NULL; oldlimit = NULL; PROC_LOCK(p); if (td->td_ucred != p->p_ucred) { oldcred = td->td_ucred; td->td_ucred = crhold(p->p_ucred); } if (td->td_limit != p->p_limit) { oldlimit = td->td_limit; td->td_limit = lim_hold(p->p_limit); } td->td_cowgen = p->p_cowgen; PROC_UNLOCK(p); if (oldcred != NULL) crfree(oldcred); if (oldlimit != NULL) lim_free(oldlimit); } /* * Discard the current thread and exit from its context. * Always called with scheduler locked. * * Because we can't free a thread while we're operating under its context, * push the current thread into our CPU's deadthread holder. This means * we needn't worry about someone else grabbing our context before we * do a cpu_throw(). */ void thread_exit(void) { uint64_t runtime, new_switchtime; struct thread *td; struct thread *td2; struct proc *p; int wakeup_swapper; td = curthread; p = td->td_proc; PROC_SLOCK_ASSERT(p, MA_OWNED); mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT(p != NULL, ("thread exiting without a process")); CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, (long)p->p_pid, td->td_name); SDT_PROBE0(proc, , , lwp__exit); KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); /* * drop FPU & debug register state storage, or any other * architecture specific resources that * would not be on a new untouched process. */ cpu_thread_exit(td); /* * The last thread is left attached to the process * So that the whole bundle gets recycled. Skip * all this stuff if we never had threads. * EXIT clears all sign of other threads when * it goes to single threading, so the last thread always * takes the short path. */ if (p->p_flag & P_HADTHREADS) { if (p->p_numthreads > 1) { atomic_add_int(&td->td_proc->p_exitthreads, 1); thread_unlink(td); td2 = FIRST_THREAD_IN_PROC(p); sched_exit_thread(td2, td); /* * The test below is NOT true if we are the * sole exiting thread. P_STOPPED_SINGLE is unset * in exit1() after it is the only survivor. */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); if (wakeup_swapper) kick_proc0(); } } PCPU_SET(deadthread, td); } else { /* * The last thread is exiting.. but not through exit() */ panic ("thread_exit: Last thread exiting on its own"); } } #ifdef HWPMC_HOOKS /* * If this thread is part of a process that is being tracked by hwpmc(4), * inform the module of the thread's impending exit. */ if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); #endif PROC_UNLOCK(p); PROC_STATLOCK(p); thread_lock(td); PROC_SUNLOCK(p); /* Do the same timestamp bookkeeping that mi_switch() would do. */ new_switchtime = cpu_ticks(); runtime = new_switchtime - PCPU_GET(switchtime); td->td_runtime += runtime; td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); PCPU_SET(switchticks, ticks); VM_CNT_INC(v_swtch); /* Save our resource usage in our process. */ td->td_ru.ru_nvcsw++; ruxagg(p, td); rucollect(&p->p_ru, &td->td_ru); PROC_STATUNLOCK(p); td->td_state = TDS_INACTIVE; #ifdef WITNESS witness_thread_exit(td); #endif CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); sched_throw(td); panic("I'm a teapot!"); /* NOTREACHED */ } /* * Do any thread specific cleanups that may be needed in wait() * called with Giant, proc and schedlock not held. */ void thread_wait(struct proc *p) { struct thread *td; mtx_assert(&Giant, MA_NOTOWNED); KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); td = FIRST_THREAD_IN_PROC(p); /* Lock the last thread so we spin until it exits cpu_throw(). */ thread_lock(td); thread_unlock(td); lock_profile_thread_exit(td); cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_clean(td); thread_cow_free(td); callout_drain(&td->td_slpcallout); thread_reap(); /* check for zombie threads etc. */ } /* * Link a thread to a process. * set up anything that needs to be initialized for it to * be used by the process. */ void thread_link(struct thread *td, struct proc *p) { /* * XXX This can't be enabled because it's called for proc0 before * its lock has been created. * PROC_LOCK_ASSERT(p, MA_OWNED); */ td->td_state = TDS_INACTIVE; td->td_proc = p; td->td_flags = TDF_INMEM; LIST_INIT(&td->td_contested); LIST_INIT(&td->td_lprof[0]); LIST_INIT(&td->td_lprof[1]); sigqueue_init(&td->td_sigqueue, p); callout_init(&td->td_slpcallout, 1); TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); p->p_numthreads++; } /* * Called from: * thread_exit() */ void thread_unlink(struct thread *td) { struct proc *p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); TAILQ_REMOVE(&p->p_threads, td, td_plist); p->p_numthreads--; /* could clear a few other things here */ /* Must NOT clear links to proc! */ } static int calc_remaining(struct proc *p, int mode) { int remaining; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); if (mode == SINGLE_EXIT) remaining = p->p_numthreads; else if (mode == SINGLE_BOUNDARY) remaining = p->p_numthreads - p->p_boundary_count; else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) remaining = p->p_numthreads - p->p_suspcount; else panic("calc_remaining: wrong mode %d", mode); return (remaining); } static int remain_for_mode(int mode) { return (mode == SINGLE_ALLPROC ? 0 : 1); } static int weed_inhib(int mode, struct thread *td2, struct proc *p) { int wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td2, MA_OWNED); wakeup_swapper = 0; switch (mode) { case SINGLE_EXIT: if (TD_IS_SUSPENDED(td2)) wakeup_swapper |= thread_unsuspend_one(td2, p, true); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) wakeup_swapper |= sleepq_abort(td2, EINTR); break; case SINGLE_BOUNDARY: case SINGLE_NO_EXIT: if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) wakeup_swapper |= thread_unsuspend_one(td2, p, false); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) wakeup_swapper |= sleepq_abort(td2, ERESTART); break; case SINGLE_ALLPROC: /* * ALLPROC suspend tries to avoid spurious EINTR for * threads sleeping interruptable, by suspending the * thread directly, similarly to sig_suspend_threads(). * Since such sleep is not performed at the user * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP * is used to avoid immediate un-suspend. */ if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | TDF_ALLPROCSUSP)) == 0) wakeup_swapper |= thread_unsuspend_one(td2, p, false); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { if ((td2->td_flags & TDF_SBDRY) == 0) { thread_suspend_one(td2); td2->td_flags |= TDF_ALLPROCSUSP; } else { wakeup_swapper |= sleepq_abort(td2, ERESTART); } } break; } return (wakeup_swapper); } /* * Enforce single-threading. * * Returns 1 if the caller must abort (another thread is waiting to * exit the process or similar). Process is locked! * Returns 0 when you are successfully the only thread running. * A process has successfully single threaded in the suspend mode when * There are no threads in user mode. Threads in the kernel must be * allowed to continue until they get to the user boundary. They may even * copy out their return values and data before suspending. They may however be * accelerated in reaching the user boundary as we will wake up * any sleeping threads that are interruptable. (PCATCH). */ int thread_single(struct proc *p, int mode) { struct thread *td; struct thread *td2; int remaining, wakeup_swapper; td = curthread; KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, ("invalid mode %d", mode)); /* * If allowing non-ALLPROC singlethreading for non-curproc * callers, calc_remaining() and remain_for_mode() should be * adjusted to also account for td->td_proc != p. For now * this is not implemented because it is not used. */ KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || (mode != SINGLE_ALLPROC && td->td_proc == p), ("mode %d proc %p curproc %p", mode, p, td->td_proc)); mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) return (0); /* Is someone already single threading? */ if (p->p_singlethread != NULL && p->p_singlethread != td) return (1); if (mode == SINGLE_EXIT) { p->p_flag |= P_SINGLE_EXIT; p->p_flag &= ~P_SINGLE_BOUNDARY; } else { p->p_flag &= ~P_SINGLE_EXIT; if (mode == SINGLE_BOUNDARY) p->p_flag |= P_SINGLE_BOUNDARY; else p->p_flag &= ~P_SINGLE_BOUNDARY; } if (mode == SINGLE_ALLPROC) p->p_flag |= P_TOTAL_STOP; p->p_flag |= P_STOPPED_SINGLE; PROC_SLOCK(p); p->p_singlethread = td; remaining = calc_remaining(p, mode); while (remaining != remain_for_mode(mode)) { if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) goto stopme; wakeup_swapper = 0; FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; thread_lock(td2); td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; if (TD_IS_INHIBITED(td2)) { wakeup_swapper |= weed_inhib(mode, td2, p); #ifdef SMP } else if (TD_IS_RUNNING(td2) && td != td2) { forward_signal(td2); #endif } thread_unlock(td2); } if (wakeup_swapper) kick_proc0(); remaining = calc_remaining(p, mode); /* * Maybe we suspended some threads.. was it enough? */ if (remaining == remain_for_mode(mode)) break; stopme: /* * Wake us up when everyone else has suspended. * In the mean time we suspend as well. */ thread_suspend_switch(td, p); remaining = calc_remaining(p, mode); } if (mode == SINGLE_EXIT) { /* * Convert the process to an unthreaded process. The * SINGLE_EXIT is called by exit1() or execve(), in * both cases other threads must be retired. */ KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); p->p_singlethread = NULL; p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); /* * Wait for any remaining threads to exit cpu_throw(). */ while (p->p_exitthreads != 0) { PROC_SUNLOCK(p); PROC_UNLOCK(p); sched_relinquish(td); PROC_LOCK(p); PROC_SLOCK(p); } } else if (mode == SINGLE_BOUNDARY) { /* * Wait until all suspended threads are removed from * the processors. The thread_suspend_check() * increments p_boundary_count while it is still * running, which makes it possible for the execve() * to destroy vmspace while our other threads are * still using the address space. * * We lock the thread, which is only allowed to * succeed after context switch code finished using * the address space. */ FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; thread_lock(td2); KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, ("td %p not on boundary", td2)); KASSERT(TD_IS_SUSPENDED(td2), ("td %p is not suspended", td2)); thread_unlock(td2); } } PROC_SUNLOCK(p); return (0); } bool thread_suspend_check_needed(void) { struct proc *p; struct thread *td; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && (td->td_dbgflags & TDB_SUSPEND) != 0)); } /* * Called in from locations that can safely check to see * whether we have to suspend or at least throttle for a * single-thread event (e.g. fork). * * Such locations include userret(). * If the "return_instead" argument is non zero, the thread must be able to * accept 0 (caller may continue), or 1 (caller must abort) as a result. * * The 'return_instead' argument tells the function if it may do a * thread_exit() or suspend, or whether the caller must abort and back * out instead. * * If the thread that set the single_threading request has set the * P_SINGLE_EXIT bit in the process flags then this call will never return * if 'return_instead' is false, but will exit. * * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 *---------------+--------------------+--------------------- * 0 | returns 0 | returns 0 or 1 * | when ST ends | immediately *---------------+--------------------+--------------------- * 1 | thread exits | returns 1 * | | immediately * 0 = thread_exit() or suspension ok, * other = return error instead of stopping the thread. * * While a full suspension is under effect, even a single threading * thread would be suspended if it made this call (but it shouldn't). * This call should only be made from places where * thread_exit() would be safe as that may be the outcome unless * return_instead is set. */ int thread_suspend_check(int return_instead) { struct thread *td; struct proc *p; int wakeup_swapper; td = curthread; p = td->td_proc; mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); while (thread_suspend_check_needed()) { if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { KASSERT(p->p_singlethread != NULL, ("singlethread not set")); /* * The only suspension in action is a * single-threading. Single threader need not stop. * It is safe to access p->p_singlethread unlocked * because it can only be set to our address by us. */ if (p->p_singlethread == td) return (0); /* Exempt from stopping. */ } if ((p->p_flag & P_SINGLE_EXIT) && return_instead) return (EINTR); /* Should we goto user boundary if we didn't come from there? */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) return (ERESTART); /* * Ignore suspend requests if they are deferred. */ if ((td->td_flags & TDF_SBDRY) != 0) { KASSERT(return_instead, ("TDF_SBDRY set for unsafe thread_suspend_check")); KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != (TDF_SEINTR | TDF_SERESTART), ("both TDF_SEINTR and TDF_SERESTART")); return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); } /* * If the process is waiting for us to exit, * this thread should just suicide. * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. */ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { PROC_UNLOCK(p); /* * Allow Linux emulation layer to do some work * before thread suicide. */ if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) (p->p_sysent->sv_thread_detach)(td); umtx_thread_exit(td); kern_thr_exit(td); panic("stopped thread did not exit"); } PROC_SLOCK(p); thread_stopped(p); if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount + 1) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); if (wakeup_swapper) kick_proc0(); } } PROC_UNLOCK(p); thread_lock(td); /* * When a thread suspends, it just * gets taken off all queues. */ thread_suspend_one(td); if (return_instead == 0) { p->p_boundary_count++; td->td_flags |= TDF_BOUNDARY; } PROC_SUNLOCK(p); mi_switch(SW_INVOL | SWT_SUSPEND, NULL); thread_unlock(td); PROC_LOCK(p); } return (0); } void thread_suspend_switch(struct thread *td, struct proc *p) { KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); /* * We implement thread_suspend_one in stages here to avoid * dropping the proc lock while the thread lock is owned. */ if (p == td->td_proc) { thread_stopped(p); p->p_suspcount++; } PROC_UNLOCK(p); thread_lock(td); td->td_flags &= ~TDF_NEEDSUSPCHK; TD_SET_SUSPENDED(td); sched_sleep(td, 0); PROC_SUNLOCK(p); DROP_GIANT(); mi_switch(SW_VOL | SWT_SUSPEND, NULL); thread_unlock(td); PICKUP_GIANT(); PROC_LOCK(p); PROC_SLOCK(p); } void thread_suspend_one(struct thread *td) { struct proc *p; p = td->td_proc; PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); p->p_suspcount++; td->td_flags &= ~TDF_NEEDSUSPCHK; TD_SET_SUSPENDED(td); sched_sleep(td, 0); } static int thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) { THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); TD_CLR_SUSPENDED(td); td->td_flags &= ~TDF_ALLPROCSUSP; if (td->td_proc == p) { PROC_SLOCK_ASSERT(p, MA_OWNED); p->p_suspcount--; if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { td->td_flags &= ~TDF_BOUNDARY; p->p_boundary_count--; } } return (setrunnable(td)); } /* * Allow all threads blocked by single threading to continue running. */ void thread_unsuspend(struct proc *p) { struct thread *td; int wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); wakeup_swapper = 0; if (!P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (TD_IS_SUSPENDED(td)) { wakeup_swapper |= thread_unsuspend_one(td, p, true); } thread_unlock(td); } } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && p->p_numthreads == p->p_suspcount) { /* * Stopping everything also did the job for the single * threading request. Now we've downgraded to single-threaded, * let it continue. */ if (p->p_singlethread->td_proc == p) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); } } if (wakeup_swapper) kick_proc0(); } /* * End the single threading mode.. */ void thread_single_end(struct proc *p, int mode) { struct thread *td; int wakeup_swapper; KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, ("invalid mode %d", mode)); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), ("mode %d does not match P_TOTAL_STOP", mode)); KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, ("thread_single_end from other thread %p %p", curthread, p->p_singlethread)); KASSERT(mode != SINGLE_BOUNDARY || (p->p_flag & P_SINGLE_BOUNDARY) != 0, ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | P_TOTAL_STOP); PROC_SLOCK(p); p->p_singlethread = NULL; wakeup_swapper = 0; /* * If there are other threads they may now run, * unless of course there is a blanket 'stop order' * on the process. The single threader must be allowed * to continue however as this is a bad place to stop. */ if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (TD_IS_SUSPENDED(td)) { wakeup_swapper |= thread_unsuspend_one(td, p, mode == SINGLE_BOUNDARY); } thread_unlock(td); } } KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, ("inconsistent boundary count %d", p->p_boundary_count)); PROC_SUNLOCK(p); if (wakeup_swapper) kick_proc0(); } struct thread * thread_find(struct proc *p, lwpid_t tid) { struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); FOREACH_THREAD_IN_PROC(p, td) { if (td->td_tid == tid) break; } return (td); } /* Locate a thread by number; return with proc lock held. */ struct thread * tdfind(lwpid_t tid, pid_t pid) { #define RUN_THRESH 16 struct thread *td; int run = 0; rw_rlock(&tidhash_lock); LIST_FOREACH(td, TIDHASH(tid), td_hash) { if (td->td_tid == tid) { if (pid != -1 && td->td_proc->p_pid != pid) { td = NULL; break; } PROC_LOCK(td->td_proc); if (td->td_proc->p_state == PRS_NEW) { PROC_UNLOCK(td->td_proc); td = NULL; break; } if (run > RUN_THRESH) { if (rw_try_upgrade(&tidhash_lock)) { LIST_REMOVE(td, td_hash); LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); rw_wunlock(&tidhash_lock); return (td); } } break; } run++; } rw_runlock(&tidhash_lock); return (td); } void tidhash_add(struct thread *td) { rw_wlock(&tidhash_lock); LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); rw_wunlock(&tidhash_lock); } void tidhash_remove(struct thread *td) { rw_wlock(&tidhash_lock); LIST_REMOVE(td, td_hash); rw_wunlock(&tidhash_lock); } Index: projects/clang700-import/sys/kern/makesyscalls.sh =================================================================== --- projects/clang700-import/sys/kern/makesyscalls.sh (revision 340426) +++ projects/clang700-import/sys/kern/makesyscalls.sh (revision 340427) @@ -1,767 +1,770 @@ #! /bin/sh - # @(#)makesyscalls.sh 8.1 (Berkeley) 6/10/93 # $FreeBSD$ set -e # name of compat options: compat=COMPAT_43 compat4=COMPAT_FREEBSD4 compat6=COMPAT_FREEBSD6 compat7=COMPAT_FREEBSD7 compat10=COMPAT_FREEBSD10 compat11=COMPAT_FREEBSD11 # output files: sysnames="syscalls.c" sysproto="../sys/sysproto.h" sysproto_h=_SYS_SYSPROTO_H_ syshdr="../sys/syscall.h" sysmk="../sys/syscall.mk" syssw="init_sysent.c" syscallprefix="SYS_" switchname="sysent" namesname="syscallnames" systrace="systrace_args.c" # tmp files: sysaue="sysent.aue.$$" sysdcl="sysent.dcl.$$" syscompat="sysent.compat.$$" syscompatdcl="sysent.compatdcl.$$" syscompat4="sysent.compat4.$$" syscompat4dcl="sysent.compat4dcl.$$" syscompat6="sysent.compat6.$$" syscompat6dcl="sysent.compat6dcl.$$" syscompat7="sysent.compat7.$$" syscompat7dcl="sysent.compat7dcl.$$" syscompat10="sysent.compat10.$$" syscompat10dcl="sysent.compat10dcl.$$" syscompat11="sysent.compat11.$$" syscompat11dcl="sysent.compat11dcl.$$" sysent="sysent.switch.$$" sysinc="sysinc.switch.$$" sysarg="sysarg.switch.$$" sysprotoend="sysprotoend.$$" systracetmp="systrace.$$" systraceret="systraceret.$$" +capabilities_conf="capabilities.conf" -if [ -r capabilities.conf ]; then - capenabled=`egrep -v '^#|^$' capabilities.conf` - capenabled=`echo $capenabled | sed 's/ /,/g'` -else - capenabled="" -fi - trap "rm $sysaue $sysdcl $syscompat $syscompatdcl $syscompat4 $syscompat4dcl $syscompat6 $syscompat6dcl $syscompat7 $syscompat7dcl $syscompat10 $syscompat10dcl $syscompat11 $syscompat11dcl $sysent $sysinc $sysarg $sysprotoend $systracetmp $systraceret" 0 touch $sysaue $sysdcl $syscompat $syscompatdcl $syscompat4 $syscompat4dcl $syscompat6 $syscompat6dcl $syscompat7 $syscompat7dcl $syscompat10 $syscompat10dcl $syscompat11 $syscompat11dcl $sysent $sysinc $sysarg $sysprotoend $systracetmp $systraceret case $# in 0) echo "usage: $0 input-file " 1>&2 exit 1 ;; esac if [ -n "$2" ]; then . $2 fi +if [ -r $capabilities_conf ]; then + capenabled=`egrep -v '^#|^$' $capabilities_conf` + capenabled=`echo $capenabled | sed 's/ /,/g'` +else + capenabled="" +fi + sed -e ' # FreeBSD ID, includes, comments, and blank lines /.*\$FreeBSD/b done_joining /^[#;]/b done_joining /^$/b done_joining # Join lines ending in backslash :joining /\\$/{a\ N s/\\\n// b joining } # OBSOL, etc lines without function signatures /^[0-9][^{]*$/b done_joining # Join incomplete signatures. The { must appear on the first line # and the } must appear on the last line (modulo lines joined by # backslashes). /^[^}]*$/{a\ N s/\n// b joining } :done_joining 2,${ /^#/!s/\([{}()*,]\)/ \1 /g } ' < $1 | awk " BEGIN { sysaue = \"$sysaue\" sysdcl = \"$sysdcl\" sysproto = \"$sysproto\" sysprotoend = \"$sysprotoend\" sysproto_h = \"$sysproto_h\" syscompat = \"$syscompat\" syscompatdcl = \"$syscompatdcl\" syscompat4 = \"$syscompat4\" syscompat4dcl = \"$syscompat4dcl\" syscompat6 = \"$syscompat6\" syscompat6dcl = \"$syscompat6dcl\" syscompat7 = \"$syscompat7\" syscompat7dcl = \"$syscompat7dcl\" syscompat10 = \"$syscompat10\" syscompat10dcl = \"$syscompat10dcl\" syscompat11 = \"$syscompat11\" syscompat11dcl = \"$syscompat11dcl\" sysent = \"$sysent\" syssw = \"$syssw\" sysinc = \"$sysinc\" sysarg = \"$sysarg\" sysnames = \"$sysnames\" syshdr = \"$syshdr\" sysmk = \"$sysmk\" systrace = \"$systrace\" systracetmp = \"$systracetmp\" systraceret = \"$systraceret\" compat = \"$compat\" compat4 = \"$compat4\" compat6 = \"$compat6\" compat7 = \"$compat7\" compat10 = \"$compat10\" compat11 = \"$compat11\" syscallprefix = \"$syscallprefix\" switchname = \"$switchname\" namesname = \"$namesname\" infile = \"$1\" + abi_func_prefix = \"$abi_func_prefix\" capenabled_string = \"$capenabled\" "' split(capenabled_string, capenabled, ","); printf "\n/* The casts are bogus but will do for now. */\n" > sysent printf "struct sysent %s[] = {\n",switchname > sysent printf "/*\n * System call switch table.\n *\n" > syssw printf " * DO NOT EDIT-- this file is automatically generated.\n" > syssw printf " * $%s$\n", "FreeBSD" > syssw printf " */\n\n" > syssw printf "/*\n * System call prototypes.\n *\n" > sysarg printf " * DO NOT EDIT-- this file is automatically generated.\n" > sysarg printf " * $%s$\n", "FreeBSD" > sysarg printf " */\n\n" > sysarg printf "#ifndef %s\n", sysproto_h > sysarg printf "#define\t%s\n\n", sysproto_h > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n\n" > sysarg printf "#include \n\n" > sysarg printf "struct proc;\n\n" > sysarg printf "struct thread;\n\n" > sysarg printf "#define\tPAD_(t)\t(sizeof(register_t) <= sizeof(t) ? \\\n" > sysarg printf "\t\t0 : sizeof(register_t) - sizeof(t))\n\n" > sysarg printf "#if BYTE_ORDER == LITTLE_ENDIAN\n"> sysarg printf "#define\tPADL_(t)\t0\n" > sysarg printf "#define\tPADR_(t)\tPAD_(t)\n" > sysarg printf "#else\n" > sysarg printf "#define\tPADL_(t)\tPAD_(t)\n" > sysarg printf "#define\tPADR_(t)\t0\n" > sysarg printf "#endif\n\n" > sysarg printf "\n#ifdef %s\n\n", compat > syscompat printf "\n#ifdef %s\n\n", compat4 > syscompat4 printf "\n#ifdef %s\n\n", compat6 > syscompat6 printf "\n#ifdef %s\n\n", compat7 > syscompat7 printf "\n#ifdef %s\n\n", compat10 > syscompat10 printf "\n#ifdef %s\n\n", compat11 > syscompat11 printf "/*\n * System call names.\n *\n" > sysnames printf " * DO NOT EDIT-- this file is automatically generated.\n" > sysnames printf " * $%s$\n", "FreeBSD" > sysnames printf " */\n\n" > sysnames printf "const char *%s[] = {\n", namesname > sysnames printf "/*\n * System call numbers.\n *\n" > syshdr printf " * DO NOT EDIT-- this file is automatically generated.\n" > syshdr printf " * $%s$\n", "FreeBSD" > syshdr printf " */\n\n" > syshdr printf "# FreeBSD system call object files.\n" > sysmk printf "# DO NOT EDIT-- this file is automatically generated.\n" > sysmk printf "# $%s$\n", "FreeBSD" > sysmk printf "MIASM = " > sysmk printf "/*\n * System call argument to DTrace register array converstion.\n *\n" > systrace printf " * DO NOT EDIT-- this file is automatically generated.\n" > systrace printf " * $%s$\n", "FreeBSD" > systrace printf " * This file is part of the DTrace syscall provider.\n */\n\n" > systrace printf "static void\nsystrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)\n{\n" > systrace printf "\tint64_t *iarg = (int64_t *) uarg;\n" > systrace printf "\tswitch (sysnum) {\n" > systrace printf "static void\nsystrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)\n{\n\tconst char *p = NULL;\n" > systracetmp printf "\tswitch (sysnum) {\n" > systracetmp printf "static void\nsystrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)\n{\n\tconst char *p = NULL;\n" > systraceret printf "\tswitch (sysnum) {\n" > systraceret } NR == 1 { next } NF == 0 || $1 ~ /^;/ { next } $1 ~ /^#[ ]*include/ { print > sysinc next } $1 ~ /^#[ ]*if/ { print > sysent print > sysdcl print > sysarg print > syscompat print > syscompat4 print > syscompat6 print > syscompat7 print > syscompat10 print > syscompat11 print > sysnames print > systrace print > systracetmp print > systraceret savesyscall = syscall next } $1 ~ /^#[ ]*else/ { print > sysent print > sysdcl print > sysarg print > syscompat print > syscompat4 print > syscompat6 print > syscompat7 print > syscompat10 print > syscompat11 print > sysnames print > systrace print > systracetmp print > systraceret syscall = savesyscall next } $1 ~ /^#/ { print > sysent print > sysdcl print > sysarg print > syscompat print > syscompat4 print > syscompat6 print > syscompat7 print > syscompat10 print > syscompat11 print > sysnames print > systrace print > systracetmp print > systraceret next } # Returns true if the type "name" is the first flag in the type field function type(name, flags, n) { n = split($3, flags, /\|/) return (n > 0 && flags[1] == name) } # Returns true if the flag "name" is set in the type field function flag(name, flags, i, n) { n = split($3, flags, /\|/) for (i = 1; i <= n; i++) if (flags[i] == name) return 1 return 0 } { n = split($1, syscall_range, /-/) if (n == 1) { syscall_range[2] = syscall_range[1] } else if (n == 2) { if (!type("UNIMPL")) { printf "%s: line %d: range permitted only with UNIMPL\n", infile, NR exit 1 } } else { printf "%s: line %d: invalid syscall number or range %s\n", infile, NR, $1 exit 1 } } syscall != syscall_range[1] { printf "%s: line %d: syscall number out of sync at %d\n", infile, NR, syscall printf "line is:\n" print exit 1 } function align_sysent_comment(column) { printf("\t") > sysent column = column + 8 - column % 8 while (column < 56) { printf("\t") > sysent column = column + 8 } } function parserr(was, wanted) { printf "%s: line %d: unexpected %s (expected %s)\n", infile, NR, was, wanted exit 1 } function parseline() { f=4 # toss number, type, audit event ret_inc = 0 argc= 0; argssize = "0" thr_flag = "SY_THR_STATIC" if (flag("NOTSTATIC")) { thr_flag = "SY_THR_ABSENT" } if ($NF != "}") { funcalias=$(NF-2) argalias=$(NF-1) rettype=$NF end=NF-3 } else { funcalias="" argalias="" rettype="int" if ($(f+2) == "*") { ret_inc = 1 } end=NF } if (flag("NODEF")) { auditev="AUE_NULL" funcname=$(4 + ret_inc) argssize = "AS(" $(6 + ret_inc) ")" return } if ($f != "{") parserr($f, "{") f++ if ($end != "}") parserr($end, "}") end-- if ($end != ";") parserr($end, ";") end-- if ($end != ")") parserr($end, ")") end-- syscallret=$f f++ while (ret_inc > 0) { syscallret=syscallret " " $f f++ ret_inc-- } funcname=$f # # We now know the func name, so define a flags field for it. # Do this before any other processing as we may return early # from it. # for (cap in capenabled) { - if (funcname == capenabled[cap]) { + if (funcname == capenabled[cap] || + funcname == abi_func_prefix capenabled[cap]) { flags = "SYF_CAPENABLED"; break; } } if (funcalias == "") funcalias = funcname if (argalias == "") { argalias = funcname "_args" if (flag("COMPAT")) argalias = "o" argalias if (flag("COMPAT4")) argalias = "freebsd4_" argalias if (flag("COMPAT6")) argalias = "freebsd6_" argalias if (flag("COMPAT7")) argalias = "freebsd7_" argalias if (flag("COMPAT10")) argalias = "freebsd10_" argalias if (flag("COMPAT11")) argalias = "freebsd11_" argalias } f++ if ($f != "(") parserr($f, ")") f++ if (f == end) { if ($f != "void") parserr($f, "argument definition") return } while (f <= end) { argc++ argtype[argc]="" oldf="" while (f < end && $(f+1) != ",") { if (argtype[argc] != "" && oldf != "*") argtype[argc] = argtype[argc]" "; argtype[argc] = argtype[argc]$f; oldf = $f; f++ } if (argtype[argc] == "") parserr($f, "argument definition") # The parser adds space around parens. # Remove it from annotations. gsub(/ \( /, "(", argtype[argc]); gsub(/ \)/, ")", argtype[argc]); #remove annotations gsub(/_In[^ ]*[_)] /, "", argtype[argc]); gsub(/_Out[^ ]*[_)] /, "", argtype[argc]); argname[argc]=$f; f += 2; # skip name, and any comma } if (argc != 0) argssize = "AS(" argalias ")" } { comment = $4 if (NF < 7) for (i = 5; i <= NF; i++) comment = comment " " $i } # # The AUE_ audit event identifier. # { auditev = $2; } # # The flags, if any. # { flags = "0"; } type("STD") || type("NODEF") || type("NOARGS") || type("NOPROTO") \ || type("NOSTD") { parseline() printf("\t/* %s */\n\tcase %d: {\n", funcname, syscall) > systrace printf("\t/* %s */\n\tcase %d:\n", funcname, syscall) > systracetmp printf("\t/* %s */\n\tcase %d:\n", funcname, syscall) > systraceret if (argc > 0) { printf("\t\tswitch(ndx) {\n") > systracetmp printf("\t\tstruct %s *p = params;\n", argalias) > systrace for (i = 1; i <= argc; i++) { arg = argtype[i] sub("__restrict$", "", arg) if (index(arg, "*") > 0) printf("\t\tcase %d:\n\t\t\tp = \"userland %s\";\n\t\t\tbreak;\n", i - 1, arg) > systracetmp else printf("\t\tcase %d:\n\t\t\tp = \"%s\";\n\t\t\tbreak;\n", i - 1, arg) > systracetmp if (index(arg, "*") > 0 || arg == "caddr_t") printf("\t\tuarg[%d] = (intptr_t) p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systrace else if (arg == "union l_semun") printf("\t\tuarg[%d] = p->%s.buf; /* %s */\n", \ i - 1, \ argname[i], arg) > systrace else if (substr(arg, 1, 1) == "u" || arg == "size_t") printf("\t\tuarg[%d] = p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systrace else printf("\t\tiarg[%d] = p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systrace } printf("\t\tdefault:\n\t\t\tbreak;\n\t\t};\n") > systracetmp printf("\t\tif (ndx == 0 || ndx == 1)\n") > systraceret printf("\t\t\tp = \"%s\";\n", syscallret) > systraceret printf("\t\tbreak;\n") > systraceret } printf("\t\t*n_args = %d;\n\t\tbreak;\n\t}\n", argc) > systrace printf("\t\tbreak;\n") > systracetmp if (argc != 0 && !flag("NOARGS") && !flag("NOPROTO") && \ !flag("NODEF")) { printf("struct %s {\n", argalias) > sysarg for (i = 1; i <= argc; i++) printf("\tchar %s_l_[PADL_(%s)]; " \ "%s %s; char %s_r_[PADR_(%s)];\n", argname[i], argtype[i], argtype[i], argname[i], argname[i], argtype[i]) > sysarg printf("};\n") > sysarg } else if (!flag("NOARGS") && !flag("NOPROTO") && !flag("NODEF")) printf("struct %s {\n\tregister_t dummy;\n};\n", argalias) > sysarg if (!flag("NOPROTO") && !flag("NODEF")) { if (funcname == "nosys" || funcname == "lkmnosys" || funcname == "sysarch" || funcname ~ /^freebsd/ || funcname ~ /^linux/ || funcname ~ /^ibcs2/ || funcname ~ /^xenix/ || funcname ~ /^cloudabi/) { printf("%s\t%s(struct thread *, struct %s *)", rettype, funcname, argalias) > sysdcl } else { printf("%s\tsys_%s(struct thread *, struct %s *)", rettype, funcname, argalias) > sysdcl } printf(";\n") > sysdcl printf("#define\t%sAUE_%s\t%s\n", syscallprefix, funcalias, auditev) > sysaue } printf("\t{ %s, (sy_call_t *)", argssize) > sysent column = 8 + 2 + length(argssize) + 15 if (flag("NOSTD")) { printf("lkmressys, AUE_NULL, NULL, 0, 0, %s, SY_THR_ABSENT },", flags) > sysent column = column + length("lkmressys") + length("AUE_NULL") + 3 } else { if (funcname == "nosys" || funcname == "sysarch" || funcname == "lkmnosys" || funcname ~ /^freebsd/ || funcname ~ /^linux/ || funcname ~ /^ibcs2/ || funcname ~ /^xenix/ || funcname ~ /^cloudabi/) { printf("%s, %s, NULL, 0, 0, %s, %s },", funcname, auditev, flags, thr_flag) > sysent column = column + length(funcname) + length(auditev) + length(flags) + 3 } else { printf("sys_%s, %s, NULL, 0, 0, %s, %s },", funcname, auditev, flags, thr_flag) > sysent column = column + length(funcname) + length(auditev) + length(flags) + 3 + 4 } } align_sysent_comment(column) printf("/* %d = %s */\n", syscall, funcalias) > sysent printf("\t\"%s\",\t\t\t/* %d = %s */\n", funcalias, syscall, funcalias) > sysnames if (!flag("NODEF")) { printf("#define\t%s%s\t%d\n", syscallprefix, funcalias, syscall) > syshdr printf(" \\\n\t%s.o", funcalias) > sysmk } syscall++ next } type("COMPAT") || type("COMPAT4") || type("COMPAT6") || \ type("COMPAT7") || type("COMPAT10") || type("COMPAT11") { if (flag("COMPAT")) { ncompat++ out = syscompat outdcl = syscompatdcl wrap = "compat" prefix = "o" descr = "old" } else if (flag("COMPAT4")) { ncompat4++ out = syscompat4 outdcl = syscompat4dcl wrap = "compat4" prefix = "freebsd4_" descr = "freebsd4" } else if (flag("COMPAT6")) { ncompat6++ out = syscompat6 outdcl = syscompat6dcl wrap = "compat6" prefix = "freebsd6_" descr = "freebsd6" } else if (flag("COMPAT7")) { ncompat7++ out = syscompat7 outdcl = syscompat7dcl wrap = "compat7" prefix = "freebsd7_" descr = "freebsd7" } else if (flag("COMPAT10")) { ncompat10++ out = syscompat10 outdcl = syscompat10dcl wrap = "compat10" prefix = "freebsd10_" descr = "freebsd10" } else if (flag("COMPAT11")) { ncompat11++ out = syscompat11 outdcl = syscompat11dcl wrap = "compat11" prefix = "freebsd11_" descr = "freebsd11" } parseline() if (argc != 0 && !flag("NOARGS") && !flag("NOPROTO") && \ !flag("NODEF")) { printf("struct %s {\n", argalias) > out for (i = 1; i <= argc; i++) printf("\tchar %s_l_[PADL_(%s)]; %s %s; " \ "char %s_r_[PADR_(%s)];\n", argname[i], argtype[i], argtype[i], argname[i], argname[i], argtype[i]) > out printf("};\n") > out } else if (!flag("NOARGS") && !flag("NOPROTO") && !flag("NODEF")) printf("struct %s {\n\tregister_t dummy;\n};\n", argalias) > sysarg if (!flag("NOPROTO") && !flag("NODEF")) { printf("%s\t%s%s(struct thread *, struct %s *);\n", rettype, prefix, funcname, argalias) > outdcl printf("#define\t%sAUE_%s%s\t%s\n", syscallprefix, prefix, funcname, auditev) > sysaue } if (flag("NOSTD")) { printf("\t{ %s, (sy_call_t *)%s, %s, NULL, 0, 0, 0, SY_THR_ABSENT },", "0", "lkmressys", "AUE_NULL") > sysent align_sysent_comment(8 + 2 + length("0") + 15 + \ length("lkmressys") + length("AUE_NULL") + 3) } else { printf("\t{ %s(%s,%s), %s, NULL, 0, 0, %s, %s },", wrap, argssize, funcname, auditev, flags, thr_flag) > sysent align_sysent_comment(8 + 9 + length(argssize) + 1 + \ length(funcname) + length(auditev) + \ length(flags) + 4) } printf("/* %d = %s %s */\n", syscall, descr, funcalias) > sysent printf("\t\"%s.%s\",\t\t/* %d = %s %s */\n", wrap, funcalias, syscall, descr, funcalias) > sysnames # Do not provide freebsdN_* symbols in libc for < FreeBSD 7 if (flag("COMPAT") || flag("COMPAT4") || flag("COMPAT6")) { printf("\t\t\t\t/* %d is %s %s */\n", syscall, descr, funcalias) > syshdr } else if (!flag("NODEF")) { printf("#define\t%s%s%s\t%d\n", syscallprefix, prefix, funcalias, syscall) > syshdr printf(" \\\n\t%s%s.o", prefix, funcalias) > sysmk } syscall++ next } type("OBSOL") { printf("\t{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT },") > sysent align_sysent_comment(34) printf("/* %d = obsolete %s */\n", syscall, comment) > sysent printf("\t\"obs_%s\",\t\t\t/* %d = obsolete %s */\n", $4, syscall, comment) > sysnames printf("\t\t\t\t/* %d is obsolete %s */\n", syscall, comment) > syshdr syscall++ next } type("UNIMPL") { while (syscall <= syscall_range[2]) { printf("\t{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT },\t\t\t/* %d = %s */\n", syscall, comment) > sysent printf("\t\"#%d\",\t\t\t/* %d = %s */\n", syscall, syscall, comment) > sysnames syscall++ } next } { printf "%s: line %d: unrecognized keyword %s\n", infile, NR, $3 exit 1 } END { printf "\n#define AS(name) (sizeof(struct name) / sizeof(register_t))\n" > sysinc if (ncompat != 0) { printf "\n#ifdef %s\n", compat > sysinc printf "#define compat(n, name) n, (sy_call_t *)__CONCAT(o,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat4 != 0) { printf "\n#ifdef %s\n", compat4 > sysinc printf "#define compat4(n, name) n, (sy_call_t *)__CONCAT(freebsd4_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat4(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat6 != 0) { printf "\n#ifdef %s\n", compat6 > sysinc printf "#define compat6(n, name) n, (sy_call_t *)__CONCAT(freebsd6_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat6(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat7 != 0) { printf "\n#ifdef %s\n", compat7 > sysinc printf "#define compat7(n, name) n, (sy_call_t *)__CONCAT(freebsd7_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat7(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat10 != 0) { printf "\n#ifdef %s\n", compat10 > sysinc printf "#define compat10(n, name) n, (sy_call_t *)__CONCAT(freebsd10_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat10(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat11 != 0) { printf "\n#ifdef %s\n", compat11 > sysinc printf "#define compat11(n, name) n, (sy_call_t *)__CONCAT(freebsd11_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat11(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } printf("\n#endif /* %s */\n\n", compat) > syscompatdcl printf("\n#endif /* %s */\n\n", compat4) > syscompat4dcl printf("\n#endif /* %s */\n\n", compat6) > syscompat6dcl printf("\n#endif /* %s */\n\n", compat7) > syscompat7dcl printf("\n#endif /* %s */\n\n", compat10) > syscompat10dcl printf("\n#endif /* %s */\n\n", compat11) > syscompat11dcl printf("\n#undef PAD_\n") > sysprotoend printf("#undef PADL_\n") > sysprotoend printf("#undef PADR_\n") > sysprotoend printf("\n#endif /* !%s */\n", sysproto_h) > sysprotoend printf("\n") > sysmk printf("};\n") > sysent printf("};\n") > sysnames printf("#define\t%sMAXSYSCALL\t%d\n", syscallprefix, syscall) \ > syshdr printf "\tdefault:\n\t\t*n_args = 0;\n\t\tbreak;\n\t};\n}\n" > systrace printf "\tdefault:\n\t\tbreak;\n\t};\n\tif (p != NULL)\n\t\tstrlcpy(desc, p, descsz);\n}\n" > systracetmp printf "\tdefault:\n\t\tbreak;\n\t};\n\tif (p != NULL)\n\t\tstrlcpy(desc, p, descsz);\n}\n" > systraceret } ' cat $sysinc $sysent >> $syssw cat $sysarg $sysdcl \ $syscompat $syscompatdcl \ $syscompat4 $syscompat4dcl \ $syscompat6 $syscompat6dcl \ $syscompat7 $syscompat7dcl \ $syscompat10 $syscompat10dcl \ $syscompat11 $syscompat11dcl \ $sysaue $sysprotoend > $sysproto cat $systracetmp >> $systrace cat $systraceret >> $systrace Index: projects/clang700-import/sys/kern/subr_blist.c =================================================================== --- projects/clang700-import/sys/kern/subr_blist.c (revision 340426) +++ projects/clang700-import/sys/kern/subr_blist.c (revision 340427) @@ -1,1299 +1,1144 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting * * This module implements a general bitmap allocator/deallocator. The * allocator eats around 2 bits per 'block'. The module does not * try to interpret the meaning of a 'block' other than to return * SWAPBLK_NONE on an allocation failure. * * A radix tree controls access to pieces of the bitmap, and includes * auxiliary information at each interior node about the availabilty of * contiguous free blocks in the subtree rooted at that node. Two radix * constants are involved: one for the size of the bitmaps contained in the * leaf nodes (BLIST_BMAP_RADIX), and one for the number of descendents of * each of the meta (interior) nodes (BLIST_META_RADIX). Each subtree is * associated with a range of blocks. The root of any subtree stores a * hint field that defines an upper bound on the size of the largest * allocation that can begin in the associated block range. A hint is an * upper bound on a potential allocation, but not necessarily a tight upper * bound. * - * The radix tree also implements two collapsed states for meta nodes: - * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is - * in either of these two states, all information contained underneath - * the node is considered stale. These states are used to optimize - * allocation and freeing operations. + * The bitmap field in each node directs the search for available blocks. + * For a leaf node, a bit is set if the corresponding block is free. For a + * meta node, a bit is set if the corresponding subtree contains a free + * block somewhere within it. The search at a meta node considers only + * children of that node that represent a range that includes a free block. * * The hinting greatly increases code efficiency for allocations while * the general radix structure optimizes both allocations and frees. The * radix tree should be able to operate well no matter how much * fragmentation there is and no matter how large a bitmap is used. * * The blist code wires all necessary memory at creation time. Neither * allocations nor frees require interaction with the memory subsystem. - * The non-blocking features of the blist code are used in the swap code - * (vm/swap_pager.c). + * The non-blocking nature of allocations and frees is required by swap + * code (vm/swap_pager.c). * - * LAYOUT: The radix tree is laid out recursively using a - * linear array. Each meta node is immediately followed (laid out - * sequentially in memory) by BLIST_META_RADIX lower level nodes. This - * is a recursive structure but one that can be easily scanned through - * a very simple 'skip' calculation. In order to support large radixes, - * portions of the tree may reside outside our memory allocation. We - * handle this with an early-termination optimization (when bighint is - * set to -1) on the scan. The memory allocation is only large enough - * to cover the number of blocks requested at creation time even if it - * must be encompassed in larger root-node radix. + * LAYOUT: The radix tree is laid out recursively using a linear array. + * Each meta node is immediately followed (laid out sequentially in + * memory) by BLIST_META_RADIX lower level nodes. This is a recursive + * structure but one that can be easily scanned through a very simple + * 'skip' calculation. The memory allocation is only large enough to + * cover the number of blocks requested at creation time. Nodes that + * represent blocks beyond that limit, nodes that would never be read + * or written, are not allocated, so that the last of the + * BLIST_META_RADIX lower level nodes of a some nodes may not be + * allocated. * * NOTE: the allocator cannot currently allocate more than * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too * large' if you try. This is an area that could use improvement. The * radix is large enough that this restriction does not effect the swap * system, though. Currently only the allocation code is affected by * this algorithmic unfeature. The freeing code can handle arbitrary * ranges. * * This code can be compiled stand-alone for debugging. */ #include __FBSDID("$FreeBSD$"); #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #else #ifndef BLIST_NO_DEBUG #define BLIST_DEBUG #endif +#include #include #include #include #include #include #include #include #include #include #define bitcount64(x) __bitcount64((uint64_t)(x)) #define malloc(a,b,c) calloc(a, 1) #define free(a,b) free(a) -static __inline int imax(int a, int b) { return (a > b ? a : b); } +#define ummin(a,b) ((a) < (b) ? (a) : (b)) #include void panic(const char *ctl, ...); #endif /* * static support functions */ static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count); static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t cursor, daddr_t count, u_daddr_t radix); static void blst_leaf_free(blmeta_t *scan, daddr_t relblk, int count); static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, u_daddr_t radix); static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, blist_t dest, daddr_t count); static daddr_t blst_leaf_fill(blmeta_t *scan, daddr_t blk, int count); static daddr_t blst_meta_fill(blmeta_t *scan, daddr_t allocBlk, daddr_t count, u_daddr_t radix); #ifndef _KERNEL static void blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int tab); #endif #ifdef _KERNEL static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space"); #endif _Static_assert(BLIST_BMAP_RADIX % BLIST_META_RADIX == 0, "radix divisibility error"); #define BLIST_BMAP_MASK (BLIST_BMAP_RADIX - 1) #define BLIST_META_MASK (BLIST_META_RADIX - 1) /* * For a subtree that can represent the state of up to 'radix' blocks, the * number of leaf nodes of the subtree is L=radix/BLIST_BMAP_RADIX. If 'm' * is short for BLIST_META_RADIX, then for a tree of height h with L=m**h * leaf nodes, the total number of tree nodes is 1 + m + m**2 + ... + m**h, * or, equivalently, (m**(h+1)-1)/(m-1). This quantity is called 'skip' * in the 'meta' functions that process subtrees. Since integer division * discards remainders, we can express this computation as * skip = (m * m**h) / (m - 1) * skip = (m * (radix / BLIST_BMAP_RADIX)) / (m - 1) * and since m divides BLIST_BMAP_RADIX, we can simplify further to * skip = (radix / (BLIST_BMAP_RADIX / m)) / (m - 1) * skip = radix / ((BLIST_BMAP_RADIX / m) * (m - 1)) * so that simple integer division by a constant can safely be used for the * calculation. */ static inline daddr_t radix_to_skip(daddr_t radix) { return (radix / ((BLIST_BMAP_RADIX / BLIST_META_RADIX) * BLIST_META_MASK)); } /* + * Provide a mask with count bits set, starting as position n. + */ +static inline u_daddr_t +bitrange(int n, int count) +{ + + return (((u_daddr_t)-1 << n) & + ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - (n + count)))); +} + + +/* * Use binary search, or a faster method, to find the 1 bit in a u_daddr_t. * Assumes that the argument has only one bit set. */ static inline int bitpos(u_daddr_t mask) { int hi, lo, mid; switch (sizeof(mask)) { #ifdef HAVE_INLINE_FFSLL case sizeof(long long): return (ffsll(mask) - 1); #endif default: lo = 0; hi = BLIST_BMAP_RADIX; while (lo + 1 < hi) { mid = (lo + hi) >> 1; if ((mask >> mid) != 0) lo = mid; else hi = mid; } return (lo); } } /* * blist_create() - create a blist capable of handling up to the specified * number of blocks * * blocks - must be greater than 0 * flags - malloc flags * * The smallest blist consists of a single leaf node capable of * managing BLIST_BMAP_RADIX blocks. */ blist_t blist_create(daddr_t blocks, int flags) { blist_t bl; - daddr_t i, last_block; - u_daddr_t nodes, radix, skip; - int digit; + u_daddr_t nodes, radix; if (blocks == 0) panic("invalid block count"); /* * Calculate the radix and node count used for scanning. */ - last_block = blocks - 1; + nodes = 1; radix = BLIST_BMAP_RADIX; - while (radix < blocks) { - if (((last_block / radix + 1) & BLIST_META_MASK) != 0) - /* - * We must widen the blist to avoid partially - * filled nodes. - */ - last_block |= radix - 1; + while (radix <= blocks) { + nodes += 1 + (blocks - 1) / radix; radix *= BLIST_META_RADIX; } - /* - * Count the meta-nodes in the expanded tree, including the final - * terminator, from the bottom level up to the root. - */ - nodes = 1; - if (radix - blocks >= BLIST_BMAP_RADIX) - nodes++; - last_block /= BLIST_BMAP_RADIX; - while (last_block > 0) { - nodes += last_block + 1; - last_block /= BLIST_META_RADIX; - } bl = malloc(offsetof(struct blist, bl_root[nodes]), M_SWAP, flags | M_ZERO); if (bl == NULL) return (NULL); bl->bl_blocks = blocks; bl->bl_radix = radix; - bl->bl_cursor = 0; - /* - * Initialize the empty tree by filling in root values, then initialize - * just the terminators in the rest of the tree. - */ - bl->bl_root[0].bm_bighint = 0; - if (radix == BLIST_BMAP_RADIX) - bl->bl_root[0].u.bmu_bitmap = 0; - else - bl->bl_root[0].u.bmu_avail = 0; - last_block = blocks - 1; - i = 0; - while (radix > BLIST_BMAP_RADIX) { - radix /= BLIST_META_RADIX; - skip = radix_to_skip(radix); - digit = last_block / radix; - i += 1 + digit * skip; - if (digit != BLIST_META_MASK) { - /* - * Add a terminator. - */ - bl->bl_root[i + skip].bm_bighint = (daddr_t)-1; - bl->bl_root[i + skip].u.bmu_bitmap = 0; - } - last_block %= radix; - } - #if defined(BLIST_DEBUG) printf( "BLIST representing %lld blocks (%lld MB of swap)" ", requiring %lldK of ram\n", (long long)bl->bl_blocks, (long long)bl->bl_blocks * 4 / 1024, (long long)(nodes * sizeof(blmeta_t) + 1023) / 1024 ); printf("BLIST raw radix tree contains %lld records\n", (long long)nodes); #endif return (bl); } void blist_destroy(blist_t bl) { free(bl, M_SWAP); } /* * blist_alloc() - reserve space in the block bitmap. Return the base * of a contiguous region or SWAPBLK_NONE if space could * not be allocated. */ daddr_t blist_alloc(blist_t bl, daddr_t count) { daddr_t blk; + if (count > BLIST_MAX_ALLOC) + panic("allocation too large"); + /* * This loop iterates at most twice. An allocation failure in the * first iteration leads to a second iteration only if the cursor was * non-zero. When the cursor is zero, an allocation failure will * reduce the hint, stopping further iterations. */ while (count <= bl->bl_root->bm_bighint) { blk = blst_meta_alloc(bl->bl_root, bl->bl_cursor, count, bl->bl_radix); if (blk != SWAPBLK_NONE) { + bl->bl_avail -= count; bl->bl_cursor = blk + count; if (bl->bl_cursor == bl->bl_blocks) bl->bl_cursor = 0; return (blk); - } else if (bl->bl_cursor != 0) - bl->bl_cursor = 0; + } + bl->bl_cursor = 0; } return (SWAPBLK_NONE); } /* * blist_avail() - return the number of free blocks. */ daddr_t blist_avail(blist_t bl) { - if (bl->bl_radix == BLIST_BMAP_RADIX) - return (bitcount64(bl->bl_root->u.bmu_bitmap)); - else - return (bl->bl_root->u.bmu_avail); + return (bl->bl_avail); } /* * blist_free() - free up space in the block bitmap. Return the base * of a contiguous region. Panic if an inconsistancy is * found. */ void blist_free(blist_t bl, daddr_t blkno, daddr_t count) { + if (blkno < 0 || blkno + count > bl->bl_blocks) + panic("freeing invalid range"); blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix); + bl->bl_avail += count; } /* * blist_fill() - mark a region in the block bitmap as off-limits * to the allocator (i.e. allocate it), ignoring any * existing allocations. Return the number of blocks * actually filled that were free before the call. */ daddr_t blist_fill(blist_t bl, daddr_t blkno, daddr_t count) { + daddr_t filled; - return (blst_meta_fill(bl->bl_root, blkno, count, bl->bl_radix)); + if (blkno < 0 || blkno + count > bl->bl_blocks) + panic("filling invalid range"); + filled = blst_meta_fill(bl->bl_root, blkno, count, bl->bl_radix); + bl->bl_avail -= filled; + return (filled); } /* * blist_resize() - resize an existing radix tree to handle the * specified number of blocks. This will reallocate * the tree and transfer the previous bitmap to the new * one. When extending the tree you can specify whether * the new blocks are to left allocated or freed. */ void blist_resize(blist_t *pbl, daddr_t count, int freenew, int flags) { blist_t newbl = blist_create(count, flags); blist_t save = *pbl; *pbl = newbl; if (count > save->bl_blocks) count = save->bl_blocks; blst_copy(save->bl_root, 0, save->bl_radix, newbl, count); /* * If resizing upwards, should we free the new space or not? */ if (freenew && count < newbl->bl_blocks) { blist_free(newbl, count, newbl->bl_blocks - count); } blist_destroy(save); } #ifdef BLIST_DEBUG /* * blist_print() - dump radix tree */ void blist_print(blist_t bl) { - printf("BLIST cursor = %08jx {\n", (uintmax_t)bl->bl_cursor); - blst_radix_print(bl->bl_root, 0, bl->bl_radix, 4); + printf("BLIST avail = %jd, cursor = %08jx {\n", + (uintmax_t)bl->bl_avail, (uintmax_t)bl->bl_cursor); + + if (bl->bl_root->bm_bitmap != 0) + blst_radix_print(bl->bl_root, 0, bl->bl_radix, 4); printf("}\n"); } #endif static const u_daddr_t fib[] = { 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025, 121393, 196418, 317811, 514229, 832040, 1346269, 2178309, 3524578, }; /* * Use 'gap' to describe a maximal range of unallocated blocks/bits. */ struct gap_stats { daddr_t start; /* current gap start, or SWAPBLK_NONE */ daddr_t num; /* number of gaps observed */ daddr_t max; /* largest gap size */ daddr_t avg; /* average gap size */ daddr_t err; /* sum - num * avg */ daddr_t histo[nitems(fib)]; /* # gaps in each size range */ int max_bucket; /* last histo elt with nonzero val */ }; /* * gap_stats_counting() - is the state 'counting 1 bits'? * or 'skipping 0 bits'? */ static inline bool gap_stats_counting(const struct gap_stats *stats) { return (stats->start != SWAPBLK_NONE); } /* * init_gap_stats() - initialize stats on gap sizes */ static inline void init_gap_stats(struct gap_stats *stats) { bzero(stats, sizeof(*stats)); stats->start = SWAPBLK_NONE; } /* * update_gap_stats() - update stats on gap sizes */ static void update_gap_stats(struct gap_stats *stats, daddr_t posn) { daddr_t size; int hi, lo, mid; if (!gap_stats_counting(stats)) { stats->start = posn; return; } size = posn - stats->start; stats->start = SWAPBLK_NONE; if (size > stats->max) stats->max = size; /* * Find the fibonacci range that contains size, * expecting to find it in an early range. */ lo = 0; hi = 1; while (hi < nitems(fib) && fib[hi] <= size) { lo = hi; hi *= 2; } if (hi >= nitems(fib)) hi = nitems(fib); while (lo + 1 != hi) { mid = (lo + hi) >> 1; if (fib[mid] <= size) lo = mid; else hi = mid; } stats->histo[lo]++; if (lo > stats->max_bucket) stats->max_bucket = lo; stats->err += size - stats->avg; stats->num++; stats->avg += stats->err / stats->num; stats->err %= stats->num; } /* * dump_gap_stats() - print stats on gap sizes */ static inline void dump_gap_stats(const struct gap_stats *stats, struct sbuf *s) { int i; sbuf_printf(s, "number of maximal free ranges: %jd\n", (intmax_t)stats->num); sbuf_printf(s, "largest free range: %jd\n", (intmax_t)stats->max); sbuf_printf(s, "average maximal free range size: %jd\n", (intmax_t)stats->avg); sbuf_printf(s, "number of maximal free ranges of different sizes:\n"); sbuf_printf(s, " count | size range\n"); sbuf_printf(s, " ----- | ----------\n"); for (i = 0; i < stats->max_bucket; i++) { if (stats->histo[i] != 0) { sbuf_printf(s, "%20jd | ", (intmax_t)stats->histo[i]); if (fib[i] != fib[i + 1] - 1) sbuf_printf(s, "%jd to %jd\n", (intmax_t)fib[i], (intmax_t)fib[i + 1] - 1); else sbuf_printf(s, "%jd\n", (intmax_t)fib[i]); } } sbuf_printf(s, "%20jd | ", (intmax_t)stats->histo[i]); if (stats->histo[i] > 1) sbuf_printf(s, "%jd to %jd\n", (intmax_t)fib[i], (intmax_t)stats->max); else sbuf_printf(s, "%jd\n", (intmax_t)stats->max); } /* * blist_stats() - dump radix tree stats */ void blist_stats(blist_t bl, struct sbuf *s) { struct gap_stats gstats; struct gap_stats *stats = &gstats; daddr_t i, nodes, radix; u_daddr_t bit, diff, mask; init_gap_stats(stats); nodes = 0; i = bl->bl_radix; while (i < bl->bl_radix + bl->bl_blocks) { /* * Find max size subtree starting at i. */ radix = BLIST_BMAP_RADIX; while (((i / radix) & BLIST_META_MASK) == 0) radix *= BLIST_META_RADIX; /* * Check for skippable subtrees starting at i. */ while (radix > BLIST_BMAP_RADIX) { - if (bl->bl_root[nodes].u.bmu_avail == 0) { + if (bl->bl_root[nodes].bm_bitmap == 0) { if (gap_stats_counting(stats)) update_gap_stats(stats, i); break; } - if (bl->bl_root[nodes].u.bmu_avail == radix) { - if (!gap_stats_counting(stats)) - update_gap_stats(stats, i); - break; - } /* * Skip subtree root. */ nodes++; radix /= BLIST_META_RADIX; } if (radix == BLIST_BMAP_RADIX) { /* * Scan leaf. */ - mask = bl->bl_root[nodes].u.bmu_bitmap; + mask = bl->bl_root[nodes].bm_bitmap; diff = mask ^ (mask << 1); if (gap_stats_counting(stats)) diff ^= 1; while (diff != 0) { bit = diff & -diff; update_gap_stats(stats, i + bitpos(bit)); diff ^= bit; } } nodes += radix_to_skip(radix); i += radix; } update_gap_stats(stats, i); dump_gap_stats(stats, s); } /************************************************************************ * ALLOCATION SUPPORT FUNCTIONS * ************************************************************************ * * These support functions do all the actual work. They may seem * rather longish, but that's because I've commented them up. The * actual code is straight forward. * */ /* - * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap). + * BLST_NEXT_LEAF_ALLOC() - allocate the first few blocks in the next leaf. * + * 'scan' is a leaf node, associated with a block containing 'blk'. + * The next leaf node could be adjacent, or several nodes away if the + * least common ancestor of 'scan' and its neighbor is several levels + * up. Use 'blk' to determine how many meta-nodes lie between the + * leaves. If the next leaf has enough initial bits set, clear them + * and clear the bits in the meta nodes on the path up to the least + * common ancestor to mark any subtrees made completely empty. + */ +static int +blst_next_leaf_alloc(blmeta_t *scan, daddr_t blk, int count) +{ + blmeta_t *next; + daddr_t skip; + u_daddr_t radix; + int digit; + + next = scan + 1; + blk += BLIST_BMAP_RADIX; + radix = BLIST_BMAP_RADIX; + while ((digit = ((blk / radix) & BLIST_META_MASK)) == 0 && + (next->bm_bitmap & 1) == 1) { + next++; + radix *= BLIST_META_RADIX; + } + if (((next->bm_bitmap + 1) & ~((u_daddr_t)-1 << count)) != 0) { + /* + * The next leaf doesn't have enough free blocks at the + * beginning to complete the spanning allocation. + */ + return (ENOMEM); + } + /* Clear the first 'count' bits in the next leaf to allocate. */ + next->bm_bitmap &= (u_daddr_t)-1 << count; + + /* + * Update bitmaps of next-ancestors, up to least common ancestor. + */ + skip = radix_to_skip(radix); + while (radix != BLIST_BMAP_RADIX && next->bm_bitmap == 0) { + (--next)->bm_bitmap ^= 1; + radix /= BLIST_META_RADIX; + } + if (next->bm_bitmap == 0) + scan[-digit * skip].bm_bitmap ^= (u_daddr_t)1 << digit; + return (0); +} + +/* + * BLST_LEAF_ALLOC() - allocate at a leaf in the radix tree (a bitmap). + * * This is the core of the allocator and is optimized for the * BLIST_BMAP_RADIX block allocation case. Otherwise, execution * time is proportional to log2(count) + bitpos time. */ static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count) { u_daddr_t mask; int count1, hi, lo, num_shifts, range1, range_ext; range1 = 0; count1 = count - 1; num_shifts = fls(count1); - mask = scan->u.bmu_bitmap; + mask = scan->bm_bitmap; while ((-mask & ~mask) != 0 && num_shifts > 0) { /* * If bit i is set in mask, then bits in [i, i+range1] are set - * in scan->u.bmu_bitmap. The value of range1 is equal to + * in scan->bm_bitmap. The value of range1 is equal to * count1 >> num_shifts. Grow range and reduce num_shifts to 0, * while preserving these invariants. The updates to mask leave * fewer bits set, but each bit that remains set represents a - * longer string of consecutive bits set in scan->u.bmu_bitmap. + * longer string of consecutive bits set in scan->bm_bitmap. * If more updates to mask cannot clear more bits, because mask * is partitioned with all 0 bits preceding all 1 bits, the loop * terminates immediately. */ num_shifts--; range_ext = range1 + ((count1 >> num_shifts) & 1); /* * mask is a signed quantity for the shift because when it is * shifted right, the sign bit should copied; when the last * block of the leaf is free, pretend, for a while, that all the * blocks that follow it are also free. */ mask &= (daddr_t)mask >> range_ext; range1 += range_ext; } if (mask == 0) { /* * Update bighint. There is no allocation bigger than range1 * starting in this leaf. */ scan->bm_bighint = range1; return (SWAPBLK_NONE); } /* Discard any candidates that appear before blk. */ mask &= (u_daddr_t)-1 << (blk & BLIST_BMAP_MASK); if (mask == 0) return (SWAPBLK_NONE); /* * The least significant set bit in mask marks the start of the first * available range of sufficient size. Clear all the bits but that one, * and then find its position. */ mask &= -mask; lo = bitpos(mask); hi = lo + count; if (hi > BLIST_BMAP_RADIX) { /* * An allocation within this leaf is impossible, so a successful * allocation depends on the next leaf providing some of the blocks. */ - if (((blk / BLIST_BMAP_RADIX + 1) & BLIST_META_MASK) == 0) { + if (blst_next_leaf_alloc(scan, blk, hi - BLIST_BMAP_RADIX) != 0) /* - * The next leaf has a different meta-node parent, so it - * is not necessarily initialized. Update bighint, - * comparing the range found at the end of mask to the - * largest earlier range that could have been made to - * vanish in the initial processing of mask. + * The hint cannot be updated, because the same + * allocation request could be satisfied later, by this + * leaf, if the state of the next leaf changes, and + * without any changes to this leaf. */ - scan->bm_bighint = imax(BLIST_BMAP_RADIX - lo, range1); return (SWAPBLK_NONE); - } - hi -= BLIST_BMAP_RADIX; - if (((scan[1].u.bmu_bitmap + 1) & ~((u_daddr_t)-1 << hi)) != 0) { - /* - * The next leaf doesn't have enough free blocks at the - * beginning to complete the spanning allocation. The - * hint cannot be updated, because the same allocation - * request could be satisfied later, by this leaf, if - * the state of the next leaf changes, and without any - * changes to this leaf. - */ - return (SWAPBLK_NONE); - } - /* Clear the first 'hi' bits in the next leaf, allocating them. */ - scan[1].u.bmu_bitmap &= (u_daddr_t)-1 << hi; hi = BLIST_BMAP_RADIX; } /* Set the bits of mask at position 'lo' and higher. */ mask = -mask; if (hi == BLIST_BMAP_RADIX) { /* * Update bighint. There is no allocation bigger than range1 * available in this leaf after this allocation completes. */ scan->bm_bighint = range1; } else { /* Clear the bits of mask at position 'hi' and higher. */ mask &= (u_daddr_t)-1 >> (BLIST_BMAP_RADIX - hi); - /* If this allocation uses all the bits, clear the hint. */ - if (mask == scan->u.bmu_bitmap) - scan->bm_bighint = 0; } /* Clear the allocated bits from this leaf. */ - scan->u.bmu_bitmap &= ~mask; + scan->bm_bitmap &= ~mask; return ((blk & ~BLIST_BMAP_MASK) + lo); } /* * blist_meta_alloc() - allocate at a meta in the radix tree. * * Attempt to allocate at a meta node. If we can't, we update * bighint and return a failure. Updating bighint optimize future * calls that hit this node. We have to check for our collapse cases * and we have a few optimizations strewn in as well. */ static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t cursor, daddr_t count, u_daddr_t radix) { - daddr_t blk, i, next_skip, r, skip; - int child; + daddr_t blk, i, r, skip; + u_daddr_t bit, mask; bool scan_from_start; + int digit; if (radix == BLIST_BMAP_RADIX) return (blst_leaf_alloc(scan, cursor, count)); - if (scan->u.bmu_avail < count) { - /* - * The meta node's hint must be too large if the allocation - * exceeds the number of free blocks. Reduce the hint, and - * return failure. - */ - scan->bm_bighint = scan->u.bmu_avail; - return (SWAPBLK_NONE); - } blk = cursor & -radix; + scan_from_start = (cursor == blk); + radix /= BLIST_META_RADIX; skip = radix_to_skip(radix); - next_skip = skip / BLIST_META_RADIX; + mask = scan->bm_bitmap; + /* Discard any candidates that appear before cursor. */ + digit = (cursor / radix) & BLIST_META_MASK; + mask &= (u_daddr_t)-1 << digit; + /* - * An ALL-FREE meta node requires special handling before allocating - * any of its blocks. + * If the first try is for a block that includes the cursor, pre-undo + * the digit * radix offset in the first call; otherwise, ignore the + * cursor entirely. */ - if (scan->u.bmu_avail == radix) { - radix /= BLIST_META_RADIX; + if (((mask >> digit) & 1) == 1) + cursor -= digit * radix; + else + cursor = blk; - /* - * Reinitialize each of the meta node's children. An ALL-FREE - * meta node cannot have a terminator in any subtree. - */ - for (i = 1; i < skip; i += next_skip) { - if (next_skip == 1) - scan[i].u.bmu_bitmap = (u_daddr_t)-1; - else - scan[i].u.bmu_avail = radix; - scan[i].bm_bighint = radix; - } - } else { - radix /= BLIST_META_RADIX; - } - - if (count > radix) { - /* - * The allocation exceeds the number of blocks that are - * managed by a subtree of this meta node. - */ - panic("allocation too large"); - } - scan_from_start = cursor == blk; - child = (cursor - blk) / radix; - blk += child * radix; - for (i = 1 + child * next_skip; i < skip; i += next_skip) { + /* + * Examine the nonempty subtree associated with each bit set in mask. + */ + do { + bit = mask & -mask; + digit = bitpos(bit); + i = 1 + digit * skip; if (count <= scan[i].bm_bighint) { /* * The allocation might fit beginning in the i'th subtree. */ - r = blst_meta_alloc(&scan[i], - cursor > blk ? cursor : blk, count, radix); + r = blst_meta_alloc(&scan[i], cursor + digit * radix, + count, radix); if (r != SWAPBLK_NONE) { - scan->u.bmu_avail -= count; + if (scan[i].bm_bitmap == 0) + scan->bm_bitmap ^= bit; return (r); } - } else if (scan[i].bm_bighint == (daddr_t)-1) { - /* - * Terminator - */ - break; } - blk += radix; - } + cursor = blk; + } while ((mask ^= bit) != 0); /* - * We couldn't allocate count in this subtree, update bighint. + * We couldn't allocate count in this subtree. If the whole tree was + * scanned, and the last tree node is allocated, update bighint. */ - if (scan_from_start && scan->bm_bighint >= count) + if (scan_from_start && !(digit == BLIST_META_RADIX - 1 && + scan[i].bm_bighint == BLIST_MAX_ALLOC)) scan->bm_bighint = count - 1; return (SWAPBLK_NONE); } /* * BLST_LEAF_FREE() - free allocated block from leaf bitmap * */ static void blst_leaf_free(blmeta_t *scan, daddr_t blk, int count) { u_daddr_t mask; - int n; /* * free some data in this bitmap * mask=0000111111111110000 * \_________/\__/ * count n */ - n = blk & BLIST_BMAP_MASK; - mask = ((u_daddr_t)-1 << n) & - ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n)); - if (scan->u.bmu_bitmap & mask) + mask = bitrange(blk & BLIST_BMAP_MASK, count); + if (scan->bm_bitmap & mask) panic("freeing free block"); - scan->u.bmu_bitmap |= mask; - - /* - * We could probably do a better job here. We are required to make - * bighint at least as large as the biggest contiguous block of - * data. If we just shoehorn it, a little extra overhead will - * be incured on the next allocation (but only that one typically). - */ - scan->bm_bighint = BLIST_BMAP_RADIX; + scan->bm_bitmap |= mask; } /* * BLST_META_FREE() - free allocated blocks from radix tree meta info * * This support routine frees a range of blocks from the bitmap. * The range must be entirely enclosed by this radix node. If a * meta node, we break the range down recursively to free blocks * in subnodes (which means that this code can free an arbitrary * range whereas the allocation code cannot allocate an arbitrary * range). */ static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, u_daddr_t radix) { - daddr_t blk, i, next_skip, skip, v; - int child; + daddr_t blk, endBlk, i, skip; + int digit, endDigit; - if (scan->bm_bighint == (daddr_t)-1) - panic("freeing invalid range"); - if (radix == BLIST_BMAP_RADIX) - return (blst_leaf_free(scan, freeBlk, count)); - skip = radix_to_skip(radix); - next_skip = skip / BLIST_META_RADIX; - - if (scan->u.bmu_avail == 0) { - /* - * ALL-ALLOCATED special case, with possible - * shortcut to ALL-FREE special case. - */ - scan->u.bmu_avail = count; - scan->bm_bighint = count; - - if (count != radix) { - for (i = 1; i < skip; i += next_skip) { - if (scan[i].bm_bighint == (daddr_t)-1) - break; - scan[i].bm_bighint = 0; - if (next_skip == 1) { - scan[i].u.bmu_bitmap = 0; - } else { - scan[i].u.bmu_avail = 0; - } - } - /* fall through */ - } - } else { - scan->u.bmu_avail += count; - /* scan->bm_bighint = radix; */ - } - /* - * ALL-FREE special case. + * We could probably do a better job here. We are required to make + * bighint at least as large as the biggest allocable block of data. + * If we just shoehorn it, a little extra overhead will be incurred + * on the next allocation (but only that one typically). */ + scan->bm_bighint = BLIST_MAX_ALLOC; - if (scan->u.bmu_avail == radix) - return; - if (scan->u.bmu_avail > radix) - panic("blst_meta_free: freeing already free blocks (%lld) %lld/%lld", - (long long)count, (long long)scan->u.bmu_avail, - (long long)radix); + if (radix == BLIST_BMAP_RADIX) + return (blst_leaf_free(scan, freeBlk, count)); - /* - * Break the free down into its components - */ - - blk = freeBlk & -radix; + endBlk = ummin(freeBlk + count, (freeBlk + radix) & -radix); radix /= BLIST_META_RADIX; - - child = (freeBlk - blk) / radix; - blk += child * radix; - i = 1 + child * next_skip; - while (i < skip && blk < freeBlk + count) { - v = blk + radix - freeBlk; - if (v > count) - v = count; - blst_meta_free(&scan[i], freeBlk, v, radix); - if (scan->bm_bighint < scan[i].bm_bighint) - scan->bm_bighint = scan[i].bm_bighint; - count -= v; - freeBlk += v; + skip = radix_to_skip(radix); + blk = freeBlk & -radix; + digit = (blk / radix) & BLIST_META_MASK; + endDigit = 1 + (((endBlk - 1) / radix) & BLIST_META_MASK); + scan->bm_bitmap |= bitrange(digit, endDigit - digit); + for (i = 1 + digit * skip; blk < endBlk; i += skip) { blk += radix; - i += next_skip; + count = ummin(blk, endBlk) - freeBlk; + blst_meta_free(&scan[i], freeBlk, count, radix); + freeBlk = blk; } } /* - * BLIST_RADIX_COPY() - copy one radix tree to another + * BLST_COPY() - copy one radix tree to another * * Locates free space in the source tree and frees it in the destination * tree. The space may not already be free in the destination. */ static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, blist_t dest, daddr_t count) { - daddr_t i, next_skip, skip; + daddr_t endBlk, i, skip; /* * Leaf node */ if (radix == BLIST_BMAP_RADIX) { - u_daddr_t v = scan->u.bmu_bitmap; + u_daddr_t v = scan->bm_bitmap; if (v == (u_daddr_t)-1) { blist_free(dest, blk, count); } else if (v != 0) { int i; - for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) { + for (i = 0; i < count; ++i) { if (v & ((u_daddr_t)1 << i)) blist_free(dest, blk + i, 1); } } return; } /* * Meta node */ - if (scan->u.bmu_avail == 0) { + if (scan->bm_bitmap == 0) { /* * Source all allocated, leave dest allocated */ return; } - if (scan->u.bmu_avail == radix) { - /* - * Source all free, free entire dest - */ - if (count < radix) - blist_free(dest, blk, count); - else - blist_free(dest, blk, radix); - return; - } - - skip = radix_to_skip(radix); - next_skip = skip / BLIST_META_RADIX; + endBlk = blk + count; radix /= BLIST_META_RADIX; - - for (i = 1; count && i < skip; i += next_skip) { - if (scan[i].bm_bighint == (daddr_t)-1) - break; - - if (count >= radix) { - blst_copy(&scan[i], blk, radix, dest, radix); - count -= radix; - } else { - if (count) { - blst_copy(&scan[i], blk, radix, dest, count); - } - count = 0; - } + skip = radix_to_skip(radix); + for (i = 1; blk < endBlk; i += skip) { blk += radix; + count = radix; + if (blk >= endBlk) + count -= blk - endBlk; + blst_copy(&scan[i], blk - radix, radix, dest, count); } } /* * BLST_LEAF_FILL() - allocate specific blocks in leaf bitmap * * This routine allocates all blocks in the specified range * regardless of any existing allocations in that range. Returns * the number of blocks allocated by the call. */ static daddr_t blst_leaf_fill(blmeta_t *scan, daddr_t blk, int count) { daddr_t nblks; u_daddr_t mask; - int n; - n = blk & BLIST_BMAP_MASK; - mask = ((u_daddr_t)-1 << n) & - ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n)); + mask = bitrange(blk & BLIST_BMAP_MASK, count); /* Count the number of blocks that we are allocating. */ - nblks = bitcount64(scan->u.bmu_bitmap & mask); + nblks = bitcount64(scan->bm_bitmap & mask); - scan->u.bmu_bitmap &= ~mask; + scan->bm_bitmap &= ~mask; return (nblks); } /* * BLIST_META_FILL() - allocate specific blocks at a meta node * * This routine allocates the specified range of blocks, * regardless of any existing allocations in the range. The * range must be within the extent of this node. Returns the * number of blocks allocated by the call. */ static daddr_t blst_meta_fill(blmeta_t *scan, daddr_t allocBlk, daddr_t count, u_daddr_t radix) { - daddr_t blk, i, nblks, next_skip, skip, v; - int child; + daddr_t blk, endBlk, i, nblks, skip; + int digit; - if (scan->bm_bighint == (daddr_t)-1) - panic("filling invalid range"); - if (count > radix) { - /* - * The allocation exceeds the number of blocks that are - * managed by this node. - */ - panic("fill too large"); - } if (radix == BLIST_BMAP_RADIX) return (blst_leaf_fill(scan, allocBlk, count)); - if (count == radix || scan->u.bmu_avail == 0) { - /* - * ALL-ALLOCATED special case - */ - nblks = scan->u.bmu_avail; - scan->u.bmu_avail = 0; - scan->bm_bighint = 0; - return (nblks); - } + + endBlk = ummin(allocBlk + count, (allocBlk + radix) & -radix); + radix /= BLIST_META_RADIX; skip = radix_to_skip(radix); - next_skip = skip / BLIST_META_RADIX; blk = allocBlk & -radix; - - /* - * An ALL-FREE meta node requires special handling before allocating - * any of its blocks. - */ - if (scan->u.bmu_avail == radix) { - radix /= BLIST_META_RADIX; - - /* - * Reinitialize each of the meta node's children. An ALL-FREE - * meta node cannot have a terminator in any subtree. - */ - for (i = 1; i < skip; i += next_skip) { - if (next_skip == 1) - scan[i].u.bmu_bitmap = (u_daddr_t)-1; - else - scan[i].u.bmu_avail = radix; - scan[i].bm_bighint = radix; - } - } else { - radix /= BLIST_META_RADIX; - } - nblks = 0; - child = (allocBlk - blk) / radix; - blk += child * radix; - i = 1 + child * next_skip; - while (i < skip && blk < allocBlk + count) { - v = blk + radix - allocBlk; - if (v > count) - v = count; - nblks += blst_meta_fill(&scan[i], allocBlk, v, radix); - count -= v; - allocBlk += v; + while (blk < endBlk) { + digit = (blk / radix) & BLIST_META_MASK; + i = 1 + digit * skip; blk += radix; - i += next_skip; + count = ummin(blk, endBlk) - allocBlk; + nblks += blst_meta_fill(&scan[i], allocBlk, count, radix); + if (scan[i].bm_bitmap == 0) + scan->bm_bitmap &= ~((u_daddr_t)1 << digit); + allocBlk = blk; } - scan->u.bmu_avail -= nblks; return (nblks); } #ifdef BLIST_DEBUG static void blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int tab) { - daddr_t i, next_skip, skip; + daddr_t skip; + u_daddr_t bit, mask; + int digit; if (radix == BLIST_BMAP_RADIX) { printf( - "%*.*s(%08llx,%lld): bitmap %016llx big=%lld\n", + "%*.*s(%08llx,%lld): bitmap %0*llx big=%lld\n", tab, tab, "", (long long)blk, (long long)radix, - (long long)scan->u.bmu_bitmap, + 1 + (BLIST_BMAP_RADIX - 1) / 4, + (long long)scan->bm_bitmap, (long long)scan->bm_bighint ); return; } - if (scan->u.bmu_avail == 0) { - printf( - "%*.*s(%08llx,%lld) ALL ALLOCATED\n", - tab, tab, "", - (long long)blk, - (long long)radix - ); - return; - } - if (scan->u.bmu_avail == radix) { - printf( - "%*.*s(%08llx,%lld) ALL FREE\n", - tab, tab, "", - (long long)blk, - (long long)radix - ); - return; - } - printf( - "%*.*s(%08llx,%lld): subtree (%lld/%lld) big=%lld {\n", + "%*.*s(%08llx): subtree (%lld/%lld) bitmap %0*llx big=%lld {\n", tab, tab, "", (long long)blk, (long long)radix, - (long long)scan->u.bmu_avail, (long long)radix, + 1 + (BLIST_META_RADIX - 1) / 4, + (long long)scan->bm_bitmap, (long long)scan->bm_bighint ); - skip = radix_to_skip(radix); - next_skip = skip / BLIST_META_RADIX; radix /= BLIST_META_RADIX; + skip = radix_to_skip(radix); tab += 4; - for (i = 1; i < skip; i += next_skip) { - if (scan[i].bm_bighint == (daddr_t)-1) { - printf( - "%*.*s(%08llx,%lld): Terminator\n", - tab, tab, "", - (long long)blk, (long long)radix - ); - break; - } - blst_radix_print(&scan[i], blk, radix, tab); - blk += radix; - } + mask = scan->bm_bitmap; + /* Examine the nonempty subtree associated with each bit set in mask */ + do { + bit = mask & -mask; + digit = bitpos(bit); + blst_radix_print(&scan[1 + digit * skip], blk + digit * radix, + radix, tab); + } while ((mask ^= bit) != 0); tab -= 4; printf( "%*.*s}\n", tab, tab, "" ); } #endif #ifdef BLIST_DEBUG int main(int ac, char **av) { - int size = 1024; + int size = BLIST_META_RADIX * BLIST_BMAP_RADIX; int i; blist_t bl; struct sbuf *s; for (i = 1; i < ac; ++i) { const char *ptr = av[i]; if (*ptr != '-') { size = strtol(ptr, NULL, 0); continue; } ptr += 2; fprintf(stderr, "Bad option: %s\n", ptr - 2); exit(1); } bl = blist_create(size, M_WAITOK); blist_free(bl, 0, size); for (;;) { char buf[1024]; long long da = 0; long long count = 0; printf("%lld/%lld/%lld> ", (long long)blist_avail(bl), (long long)size, (long long)bl->bl_radix); fflush(stdout); if (fgets(buf, sizeof(buf), stdin) == NULL) break; switch(buf[0]) { case 'r': if (sscanf(buf + 1, "%lld", &count) == 1) { blist_resize(&bl, count, 1, M_WAITOK); } else { printf("?\n"); } case 'p': blist_print(bl); break; case 's': s = sbuf_new_auto(); blist_stats(bl, s); sbuf_finish(s); printf("%s", sbuf_data(s)); sbuf_delete(s); break; case 'a': if (sscanf(buf + 1, "%lld", &count) == 1) { daddr_t blk = blist_alloc(bl, count); printf(" R=%08llx\n", (long long)blk); } else { printf("?\n"); } break; case 'f': if (sscanf(buf + 1, "%llx %lld", &da, &count) == 2) { blist_free(bl, da, count); } else { printf("?\n"); } break; case 'l': if (sscanf(buf + 1, "%llx %lld", &da, &count) == 2) { printf(" n=%jd\n", (intmax_t)blist_fill(bl, da, count)); } else { printf("?\n"); } break; case '?': case 'h': puts( "p -print\n" "s -stats\n" "a %d -allocate\n" "f %x %d -free\n" "l %x %d -fill\n" "r %d -resize\n" "h/? -help" ); break; default: printf("?\n"); break; } } return(0); } void panic(const char *ctl, ...) { va_list va; va_start(va, ctl); vfprintf(stderr, ctl, va); fprintf(stderr, "\n"); va_end(va); exit(1); } #endif Index: projects/clang700-import/sys/kern/subr_epoch.c =================================================================== --- projects/clang700-import/sys/kern/subr_epoch.c (revision 340426) +++ projects/clang700-import/sys/kern/subr_epoch.c (revision 340427) @@ -1,556 +1,672 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation"); +#ifdef __amd64__ +#define EPOCH_ALIGN CACHE_LINE_SIZE*2 +#else +#define EPOCH_ALIGN CACHE_LINE_SIZE +#endif + +TAILQ_HEAD (epoch_tdlist, epoch_tracker); +typedef struct epoch_record { + ck_epoch_record_t er_record; + volatile struct epoch_tdlist er_tdlist; + volatile uint32_t er_gen; + uint32_t er_cpuid; +} __aligned(EPOCH_ALIGN) *epoch_record_t; + +struct epoch { + struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); + epoch_record_t e_pcpu_record; + int e_idx; + int e_flags; +}; + /* arbitrary --- needs benchmarking */ #define MAX_ADAPTIVE_SPIN 100 #define MAX_EPOCHS 64 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information"); SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats"); /* Stats. */ static counter_u64_t block_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, &block_count, "# of times a thread was in an epoch when epoch_wait was called"); static counter_u64_t migrate_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW, &migrate_count, "# of times thread was migrated to another CPU in epoch_wait"); static counter_u64_t turnstile_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW, &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait"); static counter_u64_t switch_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW, &switch_count, "# of times a thread voluntarily context switched in epoch_wait"); static counter_u64_t epoch_call_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW, &epoch_call_count, "# of times a callback was deferred"); static counter_u64_t epoch_call_task_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW, &epoch_call_task_count, "# of times a callback task was run"); TAILQ_HEAD (threadlist, thread); CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, ck_epoch_entry_container) epoch_t allepochs[MAX_EPOCHS]; DPCPU_DEFINE(struct grouptask, epoch_cb_task); DPCPU_DEFINE(int, epoch_cb_count); static __read_mostly int inited; static __read_mostly int epoch_count; __read_mostly epoch_t global_epoch; __read_mostly epoch_t global_epoch_preempt; static void epoch_call_task(void *context __unused); static uma_zone_t pcpu_zone_record; static void epoch_init(void *arg __unused) { int cpu; block_count = counter_u64_alloc(M_WAITOK); migrate_count = counter_u64_alloc(M_WAITOK); turnstile_count = counter_u64_alloc(M_WAITOK); switch_count = counter_u64_alloc(M_WAITOK); epoch_call_count = counter_u64_alloc(M_WAITOK); epoch_call_task_count = counter_u64_alloc(M_WAITOK); - pcpu_zone_record = uma_zcreate("epoch_record pcpu", sizeof(struct epoch_record), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU); + pcpu_zone_record = uma_zcreate("epoch_record pcpu", + sizeof(struct epoch_record), NULL, NULL, NULL, NULL, + UMA_ALIGN_PTR, UMA_ZONE_PCPU); CPU_FOREACH(cpu) { - GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL); - taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task"); + GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, + epoch_call_task, NULL); + taskqgroup_attach_cpu(qgroup_softirq, + DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, + "epoch call task"); } inited = 1; global_epoch = epoch_alloc(0); global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT); } SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL); #if !defined(EARLY_AP_STARTUP) static void epoch_init_smp(void *dummy __unused) { inited = 2; } SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL); #endif static void epoch_ctor(epoch_t epoch) { epoch_record_t er; int cpu; epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK); CPU_FOREACH(cpu) { er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); bzero(er, sizeof(*er)); - ck_epoch_register(&epoch->e_epoch, &er->er_read_record, NULL); - ck_epoch_register(&epoch->e_epoch, &er->er_write_record, NULL); + ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); er->er_cpuid = cpu; } } +static void +epoch_adjust_prio(struct thread *td, u_char prio) +{ + + thread_lock(td); + sched_prio(td, prio); + thread_unlock(td); +} + epoch_t epoch_alloc(int flags) { epoch_t epoch; if (__predict_false(!inited)) panic("%s called too early in boot", __func__); epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK); ck_epoch_init(&epoch->e_epoch); epoch_ctor(epoch); MPASS(epoch_count < MAX_EPOCHS - 2); epoch->e_flags = flags; epoch->e_idx = epoch_count; allepochs[epoch_count++] = epoch; return (epoch); } void epoch_free(epoch_t epoch) { #ifdef INVARIANTS struct epoch_record *er; int cpu; CPU_FOREACH(cpu) { er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); MPASS(TAILQ_EMPTY(&er->er_tdlist)); } #endif allepochs[epoch->e_idx] = NULL; epoch_wait(global_epoch); uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record); free(epoch, M_EPOCH); } +static epoch_record_t +epoch_currecord(epoch_t epoch) +{ + + return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu)); +} + +#define INIT_CHECK(epoch) \ + do { \ + if (__predict_false((epoch) == NULL)) \ + return; \ + } while (0) + void -epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et) +epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et) { + struct epoch_record *er; + struct thread *td; - epoch_enter_preempt(epoch, et); + MPASS(cold || epoch != NULL); + INIT_CHECK(epoch); + MPASS(epoch->e_flags & EPOCH_PREEMPT); +#ifdef EPOCH_TRACKER_DEBUG + et->et_magic_pre = EPOCH_MAGIC0; + et->et_magic_post = EPOCH_MAGIC1; +#endif + td = curthread; + et->et_td = td; + td->td_epochnest++; + critical_enter(); + sched_pin(); + + td->td_pre_epoch_prio = td->td_priority; + er = epoch_currecord(epoch); + TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link); + ck_epoch_begin(&er->er_record, &et->et_section); + critical_exit(); } void -epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et) +epoch_enter(epoch_t epoch) { + struct thread *td; + epoch_record_t er; - epoch_exit_preempt(epoch, et); + MPASS(cold || epoch != NULL); + INIT_CHECK(epoch); + td = curthread; + + td->td_epochnest++; + critical_enter(); + er = epoch_currecord(epoch); + ck_epoch_begin(&er->er_record, NULL); } void -epoch_enter_KBI(epoch_t epoch) +epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et) { + struct epoch_record *er; + struct thread *td; - epoch_enter(epoch); + INIT_CHECK(epoch); + td = curthread; + critical_enter(); + sched_unpin(); + MPASS(td->td_epochnest); + td->td_epochnest--; + er = epoch_currecord(epoch); + MPASS(epoch->e_flags & EPOCH_PREEMPT); + MPASS(et != NULL); + MPASS(et->et_td == td); +#ifdef EPOCH_TRACKER_DEBUG + MPASS(et->et_magic_pre == EPOCH_MAGIC0); + MPASS(et->et_magic_post == EPOCH_MAGIC1); + et->et_magic_pre = 0; + et->et_magic_post = 0; +#endif +#ifdef INVARIANTS + et->et_td = (void*)0xDEADBEEF; +#endif + ck_epoch_end(&er->er_record, &et->et_section); + TAILQ_REMOVE(&er->er_tdlist, et, et_link); + er->er_gen++; + if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) + epoch_adjust_prio(td, td->td_pre_epoch_prio); + critical_exit(); } void -epoch_exit_KBI(epoch_t epoch) +epoch_exit(epoch_t epoch) { + struct thread *td; + epoch_record_t er; - epoch_exit(epoch); + INIT_CHECK(epoch); + td = curthread; + MPASS(td->td_epochnest); + td->td_epochnest--; + er = epoch_currecord(epoch); + ck_epoch_end(&er->er_record, NULL); + critical_exit(); } /* - * epoch_block_handler_preempt is a callback from the ck code when another thread is - * currently in an epoch section. + * epoch_block_handler_preempt() is a callback from the CK code when another + * thread is currently in an epoch section. */ static void -epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr, - void *arg __unused) +epoch_block_handler_preempt(struct ck_epoch *global __unused, + ck_epoch_record_t *cr, void *arg __unused) { epoch_record_t record; struct thread *td, *owner, *curwaittd; - struct epoch_thread *tdwait; + struct epoch_tracker *tdwait; struct turnstile *ts; struct lock_object *lock; int spincount, gen; int locksheld __unused; - record = __containerof(cr, struct epoch_record, er_read_record); + record = __containerof(cr, struct epoch_record, er_record); td = curthread; locksheld = td->td_locks; spincount = 0; counter_u64_add(block_count, 1); /* * We lost a race and there's no longer any threads * on the CPU in an epoch section. */ if (TAILQ_EMPTY(&record->er_tdlist)) return; if (record->er_cpuid != curcpu) { /* * If the head of the list is running, we can wait for it * to remove itself from the list and thus save us the * overhead of a migration */ gen = record->er_gen; thread_unlock(td); /* * We can't actually check if the waiting thread is running * so we simply poll for it to exit before giving up and * migrating. */ do { cpu_spinwait(); } while (!TAILQ_EMPTY(&record->er_tdlist) && gen == record->er_gen && spincount++ < MAX_ADAPTIVE_SPIN); thread_lock(td); /* * If the generation has changed we can poll again * otherwise we need to migrate. */ if (gen != record->er_gen) return; /* * Being on the same CPU as that of the record on which * we need to wait allows us access to the thread * list associated with that CPU. We can then examine the * oldest thread in the queue and wait on its turnstile * until it resumes and so on until a grace period * elapses. * */ counter_u64_add(migrate_count, 1); sched_bind(td, record->er_cpuid); /* * At this point we need to return to the ck code * to scan to see if a grace period has elapsed. * We can't move on to check the thread list, because * in the meantime new threads may have arrived that * in fact belong to a different epoch. */ return; } /* * Try to find a thread in an epoch section on this CPU * waiting on a turnstile. Otherwise find the lowest * priority thread (highest prio value) and drop our priority * to match to allow it to run. */ TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) { /* * Propagate our priority to any other waiters to prevent us * from starving them. They will have their original priority * restore on exit from epoch_wait(). */ curwaittd = tdwait->et_td; if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) { critical_enter(); thread_unlock(td); thread_lock(curwaittd); sched_prio(curwaittd, td->td_priority); thread_unlock(curwaittd); thread_lock(td); critical_exit(); } if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) && ((ts = curwaittd->td_blocked) != NULL)) { /* - * We unlock td to allow turnstile_wait to reacquire the - * the thread lock. Before unlocking it we enter a critical - * section to prevent preemption after we reenable interrupts - * by dropping the thread lock in order to prevent curwaittd - * from getting to run. + * We unlock td to allow turnstile_wait to reacquire + * the thread lock. Before unlocking it we enter a + * critical section to prevent preemption after we + * reenable interrupts by dropping the thread lock in + * order to prevent curwaittd from getting to run. */ critical_enter(); thread_unlock(td); owner = turnstile_lock(ts, &lock); /* - * The owner pointer indicates that the lock succeeded. Only - * in case we hold the lock and the turnstile we locked is still - * the one that curwaittd is blocked on can we continue. Otherwise - * The turnstile pointer has been changed out from underneath - * us, as in the case where the lock holder has signalled curwaittd, + * The owner pointer indicates that the lock succeeded. + * Only in case we hold the lock and the turnstile we + * locked is still the one that curwaittd is blocked on + * can we continue. Otherwise the turnstile pointer has + * been changed out from underneath us, as in the case + * where the lock holder has signalled curwaittd, * and we need to continue. */ if (owner != NULL && ts == curwaittd->td_blocked) { - MPASS(TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd)); + MPASS(TD_IS_INHIBITED(curwaittd) && + TD_ON_LOCK(curwaittd)); critical_exit(); turnstile_wait(ts, owner, curwaittd->td_tsqueue); counter_u64_add(turnstile_count, 1); thread_lock(td); return; } else if (owner != NULL) turnstile_unlock(ts, lock); thread_lock(td); critical_exit(); KASSERT(td->td_locks == locksheld, ("%d extra locks held", td->td_locks - locksheld)); } } /* * We didn't find any threads actually blocked on a lock * so we have nothing to do except context switch away. */ counter_u64_add(switch_count, 1); mi_switch(SW_VOL | SWT_RELINQUISH, NULL); /* * Release the thread lock while yielding to * allow other threads to acquire the lock * pointed to by TDQ_LOCKPTR(td). Else a * deadlock like situation might happen. (HPS) */ thread_unlock(td); thread_lock(td); } void epoch_wait_preempt(epoch_t epoch) { struct thread *td; int was_bound; int old_cpu; int old_pinned; u_char old_prio; int locks __unused; MPASS(cold || epoch != NULL); INIT_CHECK(epoch); td = curthread; #ifdef INVARIANTS locks = curthread->td_locks; MPASS(epoch->e_flags & EPOCH_PREEMPT); if ((epoch->e_flags & EPOCH_LOCKED) == 0) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "epoch_wait() can be long running"); - KASSERT(!in_epoch(epoch), - ("epoch_wait_preempt() called in the middle " - "of an epoch section of the same epoch")); + KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle " + "of an epoch section of the same epoch")); #endif thread_lock(td); DROP_GIANT(); old_cpu = PCPU_GET(cpuid); old_pinned = td->td_pinned; old_prio = td->td_priority; was_bound = sched_is_bound(td); sched_unbind(td); td->td_pinned = 0; sched_bind(td, old_cpu); - ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL); + ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, + NULL); /* restore CPU binding, if any */ if (was_bound != 0) { sched_bind(td, old_cpu); } else { /* get thread back to initial CPU, if any */ if (old_pinned != 0) sched_bind(td, old_cpu); sched_unbind(td); } /* restore pinned after bind */ td->td_pinned = old_pinned; /* restore thread priority */ sched_prio(td, old_prio); thread_unlock(td); PICKUP_GIANT(); KASSERT(td->td_locks == locks, ("%d residual locks held", td->td_locks - locks)); } static void epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, void *arg __unused) { cpu_spinwait(); } void epoch_wait(epoch_t epoch) { MPASS(cold || epoch != NULL); INIT_CHECK(epoch); MPASS(epoch->e_flags == 0); critical_enter(); ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL); critical_exit(); } void epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t)) { epoch_record_t er; ck_epoch_entry_t *cb; cb = (void *)ctx; MPASS(callback); /* too early in boot to have epoch set up */ if (__predict_false(epoch == NULL)) goto boottime; #if !defined(EARLY_AP_STARTUP) if (__predict_false(inited < 2)) goto boottime; #endif critical_enter(); *DPCPU_PTR(epoch_cb_count) += 1; er = epoch_currecord(epoch); - ck_epoch_call(&er->er_write_record, cb, (ck_epoch_cb_t *)callback); + ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback); critical_exit(); return; boottime: callback(ctx); } static void epoch_call_task(void *arg __unused) { ck_stack_entry_t *cursor, *head, *next; ck_epoch_record_t *record; epoch_record_t er; epoch_t epoch; ck_stack_t cb_stack; int i, npending, total; ck_stack_init(&cb_stack); critical_enter(); epoch_enter(global_epoch); for (total = i = 0; i < epoch_count; i++) { if (__predict_false((epoch = allepochs[i]) == NULL)) continue; er = epoch_currecord(epoch); - record = &er->er_write_record; + record = &er->er_record; if ((npending = record->n_pending) == 0) continue; ck_epoch_poll_deferred(record, &cb_stack); total += npending - record->n_pending; } epoch_exit(global_epoch); *DPCPU_PTR(epoch_cb_count) -= total; critical_exit(); counter_u64_add(epoch_call_count, total); counter_u64_add(epoch_call_task_count, 1); head = ck_stack_batch_pop_npsc(&cb_stack); for (cursor = head; cursor != NULL; cursor = next) { struct ck_epoch_entry *entry = - ck_epoch_entry_container(cursor); + ck_epoch_entry_container(cursor); next = CK_STACK_NEXT(cursor); entry->function(entry); } } int in_epoch_verbose(epoch_t epoch, int dump_onfail) { - struct epoch_thread *tdwait; + struct epoch_tracker *tdwait; struct thread *td; epoch_record_t er; td = curthread; if (td->td_epochnest == 0) return (0); if (__predict_false((epoch) == NULL)) return (0); critical_enter(); er = epoch_currecord(epoch); TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) if (tdwait->et_td == td) { critical_exit(); return (1); } #ifdef INVARIANTS if (dump_onfail) { MPASS(td->td_pinned); printf("cpu: %d id: %d\n", curcpu, td->td_tid); TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) printf("td_tid: %d ", tdwait->et_td->td_tid); printf("\n"); } #endif critical_exit(); return (0); } int in_epoch(epoch_t epoch) { return (in_epoch_verbose(epoch, 0)); } void -epoch_adjust_prio(struct thread *td, u_char prio) +epoch_thread_init(struct thread *td) { - thread_lock(td); - sched_prio(td, prio); - thread_unlock(td); + + td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK); +} + +void +epoch_thread_fini(struct thread *td) +{ + + free(td->td_et, M_EPOCH); } Index: projects/clang700-import/sys/net/if.c =================================================================== --- projects/clang700-import/sys/net/if.c (revision 340426) +++ projects/clang700-import/sys/net/if.c (revision 340427) @@ -1,4593 +1,4587 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if.c 8.5 (Berkeley) 1/9/95 * $FreeBSD$ */ #include "opt_inet6.h" #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(INET) || defined(INET6) #include #include #include #include #include #ifdef INET #include #include #endif /* INET */ #ifdef INET6 #include #include #endif /* INET6 */ #endif /* INET || INET6 */ #include /* * Consumers of struct ifreq such as tcpdump assume no pad between ifr_name * and ifr_ifru when it is used in SIOCGIFCONF. */ _Static_assert(sizeof(((struct ifreq *)0)->ifr_name) == offsetof(struct ifreq, ifr_ifru), "gap between ifr_name and ifr_ifru"); __read_mostly epoch_t net_epoch_preempt; __read_mostly epoch_t net_epoch; #ifdef COMPAT_FREEBSD32 #include #include struct ifreq_buffer32 { uint32_t length; /* (size_t) */ uint32_t buffer; /* (void *) */ }; /* * Interface request structure used for socket * ioctl's. All interface ioctl's must have parameter * definitions which begin with ifr_name. The * remainder may be interface specific. */ struct ifreq32 { char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ union { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct ifreq_buffer32 ifru_buffer; short ifru_flags[2]; short ifru_index; int ifru_jid; int ifru_metric; int ifru_mtu; int ifru_phys; int ifru_media; uint32_t ifru_data; int ifru_cap[2]; u_int ifru_fib; u_char ifru_vlan_pcp; } ifr_ifru; }; CTASSERT(sizeof(struct ifreq) == sizeof(struct ifreq32)); CTASSERT(__offsetof(struct ifreq, ifr_ifru) == __offsetof(struct ifreq32, ifr_ifru)); struct ifgroupreq32 { char ifgr_name[IFNAMSIZ]; u_int ifgr_len; union { char ifgru_group[IFNAMSIZ]; uint32_t ifgru_groups; } ifgr_ifgru; }; struct ifmediareq32 { char ifm_name[IFNAMSIZ]; int ifm_current; int ifm_mask; int ifm_status; int ifm_active; int ifm_count; uint32_t ifm_ulist; /* (int *) */ }; #define SIOCGIFMEDIA32 _IOC_NEWTYPE(SIOCGIFMEDIA, struct ifmediareq32) #define SIOCGIFXMEDIA32 _IOC_NEWTYPE(SIOCGIFXMEDIA, struct ifmediareq32) #define _CASE_IOC_IFGROUPREQ_32(cmd) \ case _IOC_NEWTYPE((cmd), struct ifgroupreq32): #else /* !COMPAT_FREEBSD32 */ #define _CASE_IOC_IFGROUPREQ_32(cmd) #endif /* !COMPAT_FREEBSD32 */ #define CASE_IOC_IFGROUPREQ(cmd) \ _CASE_IOC_IFGROUPREQ_32(cmd) \ case (cmd) union ifreq_union { struct ifreq ifr; #ifdef COMPAT_FREEBSD32 struct ifreq32 ifr32; #endif }; union ifgroupreq_union { struct ifgroupreq ifgr; #ifdef COMPAT_FREEBSD32 struct ifgroupreq32 ifgr32; #endif }; SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); SYSCTL_INT(_net_link, OID_AUTO, ifqmaxlen, CTLFLAG_RDTUN, &ifqmaxlen, 0, "max send queue size"); /* Log link state change events */ static int log_link_state_change = 1; SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW, &log_link_state_change, 0, "log interface link state change events"); /* Log promiscuous mode change events */ static int log_promisc_mode_change = 1; SYSCTL_INT(_net_link, OID_AUTO, log_promisc_mode_change, CTLFLAG_RDTUN, &log_promisc_mode_change, 1, "log promiscuous mode change events"); /* Interface description */ static unsigned int ifdescr_maxlen = 1024; SYSCTL_UINT(_net, OID_AUTO, ifdescr_maxlen, CTLFLAG_RW, &ifdescr_maxlen, 0, "administrative maximum length for interface description"); static MALLOC_DEFINE(M_IFDESCR, "ifdescr", "ifnet descriptions"); /* global sx for non-critical path ifdescr */ static struct sx ifdescr_sx; SX_SYSINIT(ifdescr_sx, &ifdescr_sx, "ifnet descr"); void (*ng_ether_link_state_p)(struct ifnet *ifp, int state); void (*lagg_linkstate_p)(struct ifnet *ifp, int state); /* These are external hooks for CARP. */ void (*carp_linkstate_p)(struct ifnet *ifp); void (*carp_demote_adj_p)(int, char *); int (*carp_master_p)(struct ifaddr *); #if defined(INET) || defined(INET6) int (*carp_forus_p)(struct ifnet *ifp, u_char *dhost); int (*carp_output_p)(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa); int (*carp_ioctl_p)(struct ifreq *, u_long, struct thread *); int (*carp_attach_p)(struct ifaddr *, int); void (*carp_detach_p)(struct ifaddr *, bool); #endif #ifdef INET int (*carp_iamatch_p)(struct ifaddr *, uint8_t **); #endif #ifdef INET6 struct ifaddr *(*carp_iamatch6_p)(struct ifnet *ifp, struct in6_addr *taddr6); caddr_t (*carp_macmatch6_p)(struct ifnet *ifp, struct mbuf *m, const struct in6_addr *taddr); #endif struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL; /* * XXX: Style; these should be sorted alphabetically, and unprototyped * static functions should be prototyped. Currently they are sorted by * declaration order. */ static void if_attachdomain(void *); static void if_attachdomain1(struct ifnet *); static int ifconf(u_long, caddr_t); static void *if_grow(void); static void if_input_default(struct ifnet *, struct mbuf *); static int if_requestencap_default(struct ifnet *, struct if_encap_req *); static void if_route(struct ifnet *, int flag, int fam); static int if_setflag(struct ifnet *, int, int, int *, int); static int if_transmit(struct ifnet *ifp, struct mbuf *m); static void if_unroute(struct ifnet *, int flag, int fam); static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *); static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int); static void do_link_state_change(void *, int); static int if_getgroup(struct ifgroupreq *, struct ifnet *); static int if_getgroupmembers(struct ifgroupreq *); static void if_delgroups(struct ifnet *); static void if_attach_internal(struct ifnet *, int, struct if_clone *); static int if_detach_internal(struct ifnet *, int, struct if_clone **); #ifdef VIMAGE static void if_vmove(struct ifnet *, struct vnet *); #endif #ifdef INET6 /* * XXX: declare here to avoid to include many inet6 related files.. * should be more generalized? */ extern void nd6_setmtu(struct ifnet *); #endif /* ipsec helper hooks */ VNET_DEFINE(struct hhook_head *, ipsec_hhh_in[HHOOK_IPSEC_COUNT]); VNET_DEFINE(struct hhook_head *, ipsec_hhh_out[HHOOK_IPSEC_COUNT]); VNET_DEFINE(int, if_index); int ifqmaxlen = IFQ_MAXLEN; VNET_DEFINE(struct ifnethead, ifnet); /* depend on static init XXX */ VNET_DEFINE(struct ifgrouphead, ifg_head); VNET_DEFINE_STATIC(int, if_indexlim) = 8; /* Table of ifnet by index. */ VNET_DEFINE(struct ifnet **, ifindex_table); #define V_if_indexlim VNET(if_indexlim) #define V_ifindex_table VNET(ifindex_table) /* * The global network interface list (V_ifnet) and related state (such as * if_index, if_indexlim, and ifindex_table) are protected by an sxlock and * an rwlock. Either may be acquired shared to stablize the list, but both * must be acquired writable to modify the list. This model allows us to * both stablize the interface list during interrupt thread processing, but * also to stablize it over long-running ioctls, without introducing priority * inversions and deadlocks. */ struct rwlock ifnet_rwlock; RW_SYSINIT_FLAGS(ifnet_rw, &ifnet_rwlock, "ifnet_rw", RW_RECURSE); struct sx ifnet_sxlock; SX_SYSINIT_FLAGS(ifnet_sx, &ifnet_sxlock, "ifnet_sx", SX_RECURSE); /* * The allocation of network interfaces is a rather non-atomic affair; we * need to select an index before we are ready to expose the interface for * use, so will use this pointer value to indicate reservation. */ #define IFNET_HOLD (void *)(uintptr_t)(-1) static if_com_alloc_t *if_com_alloc[256]; static if_com_free_t *if_com_free[256]; static MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals"); MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); struct ifnet * ifnet_byindex_locked(u_short idx) { if (idx > V_if_index) return (NULL); if (V_ifindex_table[idx] == IFNET_HOLD) return (NULL); return (V_ifindex_table[idx]); } struct ifnet * ifnet_byindex(u_short idx) { struct ifnet *ifp; ifp = ifnet_byindex_locked(idx); return (ifp); } struct ifnet * ifnet_byindex_ref(u_short idx) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); ifp = ifnet_byindex_locked(idx); if (ifp == NULL || (ifp->if_flags & IFF_DYING)) { IFNET_RUNLOCK_NOSLEEP(); return (NULL); } if_ref(ifp); IFNET_RUNLOCK_NOSLEEP(); return (ifp); } /* * Allocate an ifindex array entry; return 0 on success or an error on * failure. */ static u_short ifindex_alloc(void **old) { u_short idx; IFNET_WLOCK_ASSERT(); /* * Try to find an empty slot below V_if_index. If we fail, take the * next slot. */ for (idx = 1; idx <= V_if_index; idx++) { if (V_ifindex_table[idx] == NULL) break; } /* Catch if_index overflow. */ if (idx >= V_if_indexlim) { *old = if_grow(); return (USHRT_MAX); } if (idx > V_if_index) V_if_index = idx; return (idx); } static void ifindex_free_locked(u_short idx) { IFNET_WLOCK_ASSERT(); V_ifindex_table[idx] = NULL; while (V_if_index > 0 && V_ifindex_table[V_if_index] == NULL) V_if_index--; } static void ifindex_free(u_short idx) { IFNET_WLOCK(); ifindex_free_locked(idx); IFNET_WUNLOCK(); } static void ifnet_setbyindex(u_short idx, struct ifnet *ifp) { V_ifindex_table[idx] = ifp; } struct ifaddr * ifaddr_byindex(u_short idx) { struct ifnet *ifp; struct ifaddr *ifa = NULL; IFNET_RLOCK_NOSLEEP(); ifp = ifnet_byindex_locked(idx); if (ifp != NULL && (ifa = ifp->if_addr) != NULL) ifa_ref(ifa); IFNET_RUNLOCK_NOSLEEP(); return (ifa); } /* * Network interface utility routines. * * Routines with ifa_ifwith* names take sockaddr *'s as * parameters. */ static void vnet_if_init(const void *unused __unused) { void *old; CK_STAILQ_INIT(&V_ifnet); CK_STAILQ_INIT(&V_ifg_head); IFNET_WLOCK(); old = if_grow(); /* create initial table */ IFNET_WUNLOCK(); epoch_wait_preempt(net_epoch_preempt); free(old, M_IFNET); vnet_if_clone_init(); } VNET_SYSINIT(vnet_if_init, SI_SUB_INIT_IF, SI_ORDER_SECOND, vnet_if_init, NULL); #ifdef VIMAGE static void vnet_if_uninit(const void *unused __unused) { VNET_ASSERT(CK_STAILQ_EMPTY(&V_ifnet), ("%s:%d tailq &V_ifnet=%p " "not empty", __func__, __LINE__, &V_ifnet)); VNET_ASSERT(CK_STAILQ_EMPTY(&V_ifg_head), ("%s:%d tailq &V_ifg_head=%p " "not empty", __func__, __LINE__, &V_ifg_head)); free((caddr_t)V_ifindex_table, M_IFNET); } VNET_SYSUNINIT(vnet_if_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, vnet_if_uninit, NULL); static void vnet_if_return(const void *unused __unused) { struct ifnet *ifp, *nifp; /* Return all inherited interfaces to their parent vnets. */ CK_STAILQ_FOREACH_SAFE(ifp, &V_ifnet, if_link, nifp) { if (ifp->if_home_vnet != ifp->if_vnet) if_vmove(ifp, ifp->if_home_vnet); } } VNET_SYSUNINIT(vnet_if_return, SI_SUB_VNET_DONE, SI_ORDER_ANY, vnet_if_return, NULL); #endif static void * if_grow(void) { int oldlim; u_int n; struct ifnet **e; void *old; old = NULL; IFNET_WLOCK_ASSERT(); oldlim = V_if_indexlim; IFNET_WUNLOCK(); n = (oldlim << 1) * sizeof(*e); e = malloc(n, M_IFNET, M_WAITOK | M_ZERO); IFNET_WLOCK(); if (V_if_indexlim != oldlim) { free(e, M_IFNET); return (NULL); } if (V_ifindex_table != NULL) { memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2); old = V_ifindex_table; } V_if_indexlim <<= 1; V_ifindex_table = e; return (old); } /* * Allocate a struct ifnet and an index for an interface. A layer 2 * common structure will also be allocated if an allocation routine is * registered for the passed type. */ struct ifnet * if_alloc(u_char type) { struct ifnet *ifp; u_short idx; void *old; ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO); restart: IFNET_WLOCK(); idx = ifindex_alloc(&old); if (__predict_false(idx == USHRT_MAX)) { IFNET_WUNLOCK(); epoch_wait_preempt(net_epoch_preempt); free(old, M_IFNET); goto restart; } ifnet_setbyindex(idx, IFNET_HOLD); IFNET_WUNLOCK(); ifp->if_index = idx; ifp->if_type = type; ifp->if_alloctype = type; #ifdef VIMAGE ifp->if_vnet = curvnet; #endif if (if_com_alloc[type] != NULL) { ifp->if_l2com = if_com_alloc[type](type, ifp); if (ifp->if_l2com == NULL) { free(ifp, M_IFNET); ifindex_free(idx); return (NULL); } } IF_ADDR_LOCK_INIT(ifp); TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp); ifp->if_afdata_initialized = 0; IF_AFDATA_LOCK_INIT(ifp); CK_STAILQ_INIT(&ifp->if_addrhead); CK_STAILQ_INIT(&ifp->if_multiaddrs); CK_STAILQ_INIT(&ifp->if_groups); #ifdef MAC mac_ifnet_init(ifp); #endif ifq_init(&ifp->if_snd, ifp); refcount_init(&ifp->if_refcount, 1); /* Index reference. */ for (int i = 0; i < IFCOUNTERS; i++) ifp->if_counters[i] = counter_u64_alloc(M_WAITOK); ifp->if_get_counter = if_get_counter_default; ifp->if_pcp = IFNET_PCP_NONE; ifnet_setbyindex(ifp->if_index, ifp); return (ifp); } /* * Do the actual work of freeing a struct ifnet, and layer 2 common * structure. This call is made when the last reference to an * interface is released. */ static void if_free_internal(struct ifnet *ifp) { KASSERT((ifp->if_flags & IFF_DYING), ("if_free_internal: interface not dying")); if (if_com_free[ifp->if_alloctype] != NULL) if_com_free[ifp->if_alloctype](ifp->if_l2com, ifp->if_alloctype); #ifdef MAC mac_ifnet_destroy(ifp); #endif /* MAC */ if (ifp->if_description != NULL) free(ifp->if_description, M_IFDESCR); IF_AFDATA_DESTROY(ifp); IF_ADDR_LOCK_DESTROY(ifp); ifq_delete(&ifp->if_snd); for (int i = 0; i < IFCOUNTERS; i++) counter_u64_free(ifp->if_counters[i]); free(ifp, M_IFNET); } static void if_destroy(epoch_context_t ctx) { struct ifnet *ifp; ifp = __containerof(ctx, struct ifnet, if_epoch_ctx); if_free_internal(ifp); } /* * Deregister an interface and free the associated storage. */ void if_free(struct ifnet *ifp) { ifp->if_flags |= IFF_DYING; /* XXX: Locking */ CURVNET_SET_QUIET(ifp->if_vnet); IFNET_WLOCK(); KASSERT(ifp == ifnet_byindex_locked(ifp->if_index), ("%s: freeing unallocated ifnet", ifp->if_xname)); ifindex_free_locked(ifp->if_index); IFNET_WUNLOCK(); if (refcount_release(&ifp->if_refcount)) epoch_call(net_epoch_preempt, &ifp->if_epoch_ctx, if_destroy); CURVNET_RESTORE(); } /* * Interfaces to keep an ifnet type-stable despite the possibility of the * driver calling if_free(). If there are additional references, we defer * freeing the underlying data structure. */ void if_ref(struct ifnet *ifp) { /* We don't assert the ifnet list lock here, but arguably should. */ refcount_acquire(&ifp->if_refcount); } void if_rele(struct ifnet *ifp) { if (!refcount_release(&ifp->if_refcount)) return; epoch_call(net_epoch_preempt, &ifp->if_epoch_ctx, if_destroy); } void ifq_init(struct ifaltq *ifq, struct ifnet *ifp) { mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF); if (ifq->ifq_maxlen == 0) ifq->ifq_maxlen = ifqmaxlen; ifq->altq_type = 0; ifq->altq_disc = NULL; ifq->altq_flags &= ALTQF_CANTCHANGE; ifq->altq_tbr = NULL; ifq->altq_ifp = ifp; } void ifq_delete(struct ifaltq *ifq) { mtx_destroy(&ifq->ifq_mtx); } /* * Perform generic interface initialization tasks and attach the interface * to the list of "active" interfaces. If vmove flag is set on entry * to if_attach_internal(), perform only a limited subset of initialization * tasks, given that we are moving from one vnet to another an ifnet which * has already been fully initialized. * * Note that if_detach_internal() removes group membership unconditionally * even when vmove flag is set, and if_attach_internal() adds only IFG_ALL. * Thus, when if_vmove() is applied to a cloned interface, group membership * is lost while a cloned one always joins a group whose name is * ifc->ifc_name. To recover this after if_detach_internal() and * if_attach_internal(), the cloner should be specified to * if_attach_internal() via ifc. If it is non-NULL, if_attach_internal() * attempts to join a group whose name is ifc->ifc_name. * * XXX: * - The decision to return void and thus require this function to * succeed is questionable. * - We should probably do more sanity checking. For instance we don't * do anything to insure if_xname is unique or non-empty. */ void if_attach(struct ifnet *ifp) { if_attach_internal(ifp, 0, NULL); } /* * Compute the least common TSO limit. */ void if_hw_tsomax_common(if_t ifp, struct ifnet_hw_tsomax *pmax) { /* * 1) If there is no limit currently, take the limit from * the network adapter. * * 2) If the network adapter has a limit below the current * limit, apply it. */ if (pmax->tsomaxbytes == 0 || (ifp->if_hw_tsomax != 0 && ifp->if_hw_tsomax < pmax->tsomaxbytes)) { pmax->tsomaxbytes = ifp->if_hw_tsomax; } if (pmax->tsomaxsegcount == 0 || (ifp->if_hw_tsomaxsegcount != 0 && ifp->if_hw_tsomaxsegcount < pmax->tsomaxsegcount)) { pmax->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; } if (pmax->tsomaxsegsize == 0 || (ifp->if_hw_tsomaxsegsize != 0 && ifp->if_hw_tsomaxsegsize < pmax->tsomaxsegsize)) { pmax->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; } } /* * Update TSO limit of a network adapter. * * Returns zero if no change. Else non-zero. */ int if_hw_tsomax_update(if_t ifp, struct ifnet_hw_tsomax *pmax) { int retval = 0; if (ifp->if_hw_tsomax != pmax->tsomaxbytes) { ifp->if_hw_tsomax = pmax->tsomaxbytes; retval++; } if (ifp->if_hw_tsomaxsegsize != pmax->tsomaxsegsize) { ifp->if_hw_tsomaxsegsize = pmax->tsomaxsegsize; retval++; } if (ifp->if_hw_tsomaxsegcount != pmax->tsomaxsegcount) { ifp->if_hw_tsomaxsegcount = pmax->tsomaxsegcount; retval++; } return (retval); } static void if_attach_internal(struct ifnet *ifp, int vmove, struct if_clone *ifc) { unsigned socksize, ifasize; int namelen, masklen; struct sockaddr_dl *sdl; struct ifaddr *ifa; if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index)) panic ("%s: BUG: if_attach called without if_alloc'd input()\n", ifp->if_xname); #ifdef VIMAGE ifp->if_vnet = curvnet; if (ifp->if_home_vnet == NULL) ifp->if_home_vnet = curvnet; #endif if_addgroup(ifp, IFG_ALL); /* Restore group membership for cloned interfaces. */ if (vmove && ifc != NULL) if_clone_addgroup(ifp, ifc); getmicrotime(&ifp->if_lastchange); ifp->if_epoch = time_uptime; KASSERT((ifp->if_transmit == NULL && ifp->if_qflush == NULL) || (ifp->if_transmit != NULL && ifp->if_qflush != NULL), ("transmit and qflush must both either be set or both be NULL")); if (ifp->if_transmit == NULL) { ifp->if_transmit = if_transmit; ifp->if_qflush = if_qflush; } if (ifp->if_input == NULL) ifp->if_input = if_input_default; if (ifp->if_requestencap == NULL) ifp->if_requestencap = if_requestencap_default; if (!vmove) { #ifdef MAC mac_ifnet_create(ifp); #endif /* * Create a Link Level name for this device. */ namelen = strlen(ifp->if_xname); /* * Always save enough space for any possiable name so we * can do a rename in place later. */ masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ; socksize = masklen + ifp->if_addrlen; if (socksize < sizeof(*sdl)) socksize = sizeof(*sdl); socksize = roundup2(socksize, sizeof(long)); ifasize = sizeof(*ifa) + 2 * socksize; ifa = ifa_alloc(ifasize, M_WAITOK); sdl = (struct sockaddr_dl *)(ifa + 1); sdl->sdl_len = socksize; sdl->sdl_family = AF_LINK; bcopy(ifp->if_xname, sdl->sdl_data, namelen); sdl->sdl_nlen = namelen; sdl->sdl_index = ifp->if_index; sdl->sdl_type = ifp->if_type; ifp->if_addr = ifa; ifa->ifa_ifp = ifp; ifa->ifa_rtrequest = link_rtrequest; ifa->ifa_addr = (struct sockaddr *)sdl; sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); ifa->ifa_netmask = (struct sockaddr *)sdl; sdl->sdl_len = masklen; while (namelen != 0) sdl->sdl_data[--namelen] = 0xff; CK_STAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link); /* Reliably crash if used uninitialized. */ ifp->if_broadcastaddr = NULL; if (ifp->if_type == IFT_ETHER) { ifp->if_hw_addr = malloc(ifp->if_addrlen, M_IFADDR, M_WAITOK | M_ZERO); } #if defined(INET) || defined(INET6) /* Use defaults for TSO, if nothing is set */ if (ifp->if_hw_tsomax == 0 && ifp->if_hw_tsomaxsegcount == 0 && ifp->if_hw_tsomaxsegsize == 0) { /* * The TSO defaults needs to be such that an * NFS mbuf list of 35 mbufs totalling just * below 64K works and that a chain of mbufs * can be defragged into at most 32 segments: */ ifp->if_hw_tsomax = min(IP_MAXPACKET, (32 * MCLBYTES) - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); ifp->if_hw_tsomaxsegcount = 35; ifp->if_hw_tsomaxsegsize = 2048; /* 2K */ /* XXX some drivers set IFCAP_TSO after ethernet attach */ if (ifp->if_capabilities & IFCAP_TSO) { if_printf(ifp, "Using defaults for TSO: %u/%u/%u\n", ifp->if_hw_tsomax, ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize); } } #endif } #ifdef VIMAGE else { /* * Update the interface index in the link layer address * of the interface. */ for (ifa = ifp->if_addr; ifa != NULL; ifa = CK_STAILQ_NEXT(ifa, ifa_link)) { if (ifa->ifa_addr->sa_family == AF_LINK) { sdl = (struct sockaddr_dl *)ifa->ifa_addr; sdl->sdl_index = ifp->if_index; } } } #endif IFNET_WLOCK(); CK_STAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link); #ifdef VIMAGE curvnet->vnet_ifcnt++; #endif IFNET_WUNLOCK(); if (domain_init_status >= 2) if_attachdomain1(ifp); EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); /* Announce the interface. */ rt_ifannouncemsg(ifp, IFAN_ARRIVAL); } static void if_epochalloc(void *dummy __unused) { net_epoch_preempt = epoch_alloc(EPOCH_PREEMPT); net_epoch = epoch_alloc(0); } SYSINIT(ifepochalloc, SI_SUB_TASKQ + 1, SI_ORDER_ANY, if_epochalloc, NULL); static void if_attachdomain(void *dummy) { struct ifnet *ifp; CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) if_attachdomain1(ifp); } SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND, if_attachdomain, NULL); static void if_attachdomain1(struct ifnet *ifp) { struct domain *dp; /* * Since dp->dom_ifattach calls malloc() with M_WAITOK, we * cannot lock ifp->if_afdata initialization, entirely. */ IF_AFDATA_LOCK(ifp); if (ifp->if_afdata_initialized >= domain_init_status) { IF_AFDATA_UNLOCK(ifp); log(LOG_WARNING, "%s called more than once on %s\n", __func__, ifp->if_xname); return; } ifp->if_afdata_initialized = domain_init_status; IF_AFDATA_UNLOCK(ifp); /* address family dependent data region */ bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); for (dp = domains; dp; dp = dp->dom_next) { if (dp->dom_ifattach) ifp->if_afdata[dp->dom_family] = (*dp->dom_ifattach)(ifp); } } /* * Remove any unicast or broadcast network addresses from an interface. */ void if_purgeaddrs(struct ifnet *ifp) { struct ifaddr *ifa; while (1) { NET_EPOCH_ENTER(); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_LINK) break; } NET_EPOCH_EXIT(); if (ifa == NULL) break; #ifdef INET /* XXX: Ugly!! ad hoc just for INET */ if (ifa->ifa_addr->sa_family == AF_INET) { struct ifaliasreq ifr; bzero(&ifr, sizeof(ifr)); ifr.ifra_addr = *ifa->ifa_addr; if (ifa->ifa_dstaddr) ifr.ifra_broadaddr = *ifa->ifa_dstaddr; if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp, NULL) == 0) continue; } #endif /* INET */ #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) { in6_purgeaddr(ifa); /* ifp_addrhead is already updated */ continue; } #endif /* INET6 */ IF_ADDR_WLOCK(ifp); CK_STAILQ_REMOVE(&ifp->if_addrhead, ifa, ifaddr, ifa_link); IF_ADDR_WUNLOCK(ifp); ifa_free(ifa); } } /* * Remove any multicast network addresses from an interface when an ifnet * is going away. */ static void if_purgemaddrs(struct ifnet *ifp) { struct ifmultiaddr *ifma; IF_ADDR_WLOCK(ifp); while (!CK_STAILQ_EMPTY(&ifp->if_multiaddrs)) { ifma = CK_STAILQ_FIRST(&ifp->if_multiaddrs); CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link); if_delmulti_locked(ifp, ifma, 1); } IF_ADDR_WUNLOCK(ifp); } /* * Detach an interface, removing it from the list of "active" interfaces. * If vmove flag is set on entry to if_detach_internal(), perform only a * limited subset of cleanup tasks, given that we are moving an ifnet from * one vnet to another, where it must be fully operational. * * XXXRW: There are some significant questions about event ordering, and * how to prevent things from starting to use the interface during detach. */ void if_detach(struct ifnet *ifp) { CURVNET_SET_QUIET(ifp->if_vnet); if_detach_internal(ifp, 0, NULL); CURVNET_RESTORE(); } /* * The vmove flag, if set, indicates that we are called from a callpath * that is moving an interface to a different vnet instance. * * The shutdown flag, if set, indicates that we are called in the * process of shutting down a vnet instance. Currently only the * vnet_if_return SYSUNINIT function sets it. Note: we can be called * on a vnet instance shutdown without this flag being set, e.g., when * the cloned interfaces are destoyed as first thing of teardown. */ static int if_detach_internal(struct ifnet *ifp, int vmove, struct if_clone **ifcp) { struct ifaddr *ifa; int i; struct domain *dp; struct ifnet *iter; int found = 0; #ifdef VIMAGE int shutdown; shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; #endif IFNET_WLOCK(); CK_STAILQ_FOREACH(iter, &V_ifnet, if_link) if (iter == ifp) { CK_STAILQ_REMOVE(&V_ifnet, ifp, ifnet, if_link); found = 1; break; } IFNET_WUNLOCK(); if (!found) { /* * While we would want to panic here, we cannot * guarantee that the interface is indeed still on * the list given we don't hold locks all the way. */ return (ENOENT); #if 0 if (vmove) panic("%s: ifp=%p not on the ifnet tailq %p", __func__, ifp, &V_ifnet); else return; /* XXX this should panic as well? */ #endif } /* * At this point we know the interface still was on the ifnet list * and we removed it so we are in a stable state. */ #ifdef VIMAGE curvnet->vnet_ifcnt--; #endif epoch_wait_preempt(net_epoch_preempt); /* * In any case (destroy or vmove) detach us from the groups * and remove/wait for pending events on the taskq. * XXX-BZ in theory an interface could still enqueue a taskq change? */ if_delgroups(ifp); taskqueue_drain(taskqueue_swi, &ifp->if_linktask); /* * Check if this is a cloned interface or not. Must do even if * shutting down as a if_vmove_reclaim() would move the ifp and * the if_clone_addgroup() will have a corrupted string overwise * from a gibberish pointer. */ if (vmove && ifcp != NULL) *ifcp = if_clone_findifc(ifp); if_down(ifp); #ifdef VIMAGE /* * On VNET shutdown abort here as the stack teardown will do all * the work top-down for us. */ if (shutdown) { /* Give interface users the chance to clean up. */ EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); /* * In case of a vmove we are done here without error. * If we would signal an error it would lead to the same * abort as if we did not find the ifnet anymore. * if_detach() calls us in void context and does not care * about an early abort notification, so life is splendid :) */ goto finish_vnet_shutdown; } #endif /* * At this point we are not tearing down a VNET and are either * going to destroy or vmove the interface and have to cleanup * accordingly. */ /* * Remove routes and flush queues. */ #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) altq_disable(&ifp->if_snd); if (ALTQ_IS_ATTACHED(&ifp->if_snd)) altq_detach(&ifp->if_snd); #endif if_purgeaddrs(ifp); #ifdef INET in_ifdetach(ifp); #endif #ifdef INET6 /* * Remove all IPv6 kernel structs related to ifp. This should be done * before removing routing entries below, since IPv6 interface direct * routes are expected to be removed by the IPv6-specific kernel API. * Otherwise, the kernel will detect some inconsistency and bark it. */ in6_ifdetach(ifp); #endif if_purgemaddrs(ifp); /* Announce that the interface is gone. */ rt_ifannouncemsg(ifp, IFAN_DEPARTURE); EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); if (!vmove) { /* * Prevent further calls into the device driver via ifnet. */ if_dead(ifp); /* * Remove link ifaddr pointer and maybe decrement if_index. * Clean up all addresses. */ free(ifp->if_hw_addr, M_IFADDR); ifp->if_hw_addr = NULL; ifp->if_addr = NULL; /* We can now free link ifaddr. */ IF_ADDR_WLOCK(ifp); if (!CK_STAILQ_EMPTY(&ifp->if_addrhead)) { ifa = CK_STAILQ_FIRST(&ifp->if_addrhead); CK_STAILQ_REMOVE(&ifp->if_addrhead, ifa, ifaddr, ifa_link); IF_ADDR_WUNLOCK(ifp); ifa_free(ifa); } else IF_ADDR_WUNLOCK(ifp); } rt_flushifroutes(ifp); #ifdef VIMAGE finish_vnet_shutdown: #endif /* * We cannot hold the lock over dom_ifdetach calls as they might * sleep, for example trying to drain a callout, thus open up the * theoretical race with re-attaching. */ IF_AFDATA_LOCK(ifp); i = ifp->if_afdata_initialized; ifp->if_afdata_initialized = 0; IF_AFDATA_UNLOCK(ifp); for (dp = domains; i > 0 && dp; dp = dp->dom_next) { if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) { (*dp->dom_ifdetach)(ifp, ifp->if_afdata[dp->dom_family]); ifp->if_afdata[dp->dom_family] = NULL; } } return (0); } #ifdef VIMAGE /* * if_vmove() performs a limited version of if_detach() in current * vnet and if_attach()es the ifnet to the vnet specified as 2nd arg. * An attempt is made to shrink if_index in current vnet, find an * unused if_index in target vnet and calls if_grow() if necessary, * and finally find an unused if_xname for the target vnet. */ static void if_vmove(struct ifnet *ifp, struct vnet *new_vnet) { struct if_clone *ifc; u_int bif_dlt, bif_hdrlen; void *old; int rc; /* * if_detach_internal() will call the eventhandler to notify * interface departure. That will detach if_bpf. We need to * safe the dlt and hdrlen so we can re-attach it later. */ bpf_get_bp_params(ifp->if_bpf, &bif_dlt, &bif_hdrlen); /* * Detach from current vnet, but preserve LLADDR info, do not * mark as dead etc. so that the ifnet can be reattached later. * If we cannot find it, we lost the race to someone else. */ rc = if_detach_internal(ifp, 1, &ifc); if (rc != 0) return; /* * Unlink the ifnet from ifindex_table[] in current vnet, and shrink * the if_index for that vnet if possible. * * NOTE: IFNET_WLOCK/IFNET_WUNLOCK() are assumed to be unvirtualized, * or we'd lock on one vnet and unlock on another. */ IFNET_WLOCK(); ifindex_free_locked(ifp->if_index); IFNET_WUNLOCK(); /* * Perform interface-specific reassignment tasks, if provided by * the driver. */ if (ifp->if_reassign != NULL) ifp->if_reassign(ifp, new_vnet, NULL); /* * Switch to the context of the target vnet. */ CURVNET_SET_QUIET(new_vnet); restart: IFNET_WLOCK(); ifp->if_index = ifindex_alloc(&old); if (__predict_false(ifp->if_index == USHRT_MAX)) { IFNET_WUNLOCK(); epoch_wait_preempt(net_epoch_preempt); free(old, M_IFNET); goto restart; } ifnet_setbyindex(ifp->if_index, ifp); IFNET_WUNLOCK(); if_attach_internal(ifp, 1, ifc); if (ifp->if_bpf == NULL) bpfattach(ifp, bif_dlt, bif_hdrlen); CURVNET_RESTORE(); } /* * Move an ifnet to or from another child prison/vnet, specified by the jail id. */ static int if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid) { struct prison *pr; struct ifnet *difp; int shutdown; /* Try to find the prison within our visibility. */ sx_slock(&allprison_lock); pr = prison_find_child(td->td_ucred->cr_prison, jid); sx_sunlock(&allprison_lock); if (pr == NULL) return (ENXIO); prison_hold_locked(pr); mtx_unlock(&pr->pr_mtx); /* Do not try to move the iface from and to the same prison. */ if (pr->pr_vnet == ifp->if_vnet) { prison_free(pr); return (EEXIST); } /* Make sure the named iface does not exists in the dst. prison/vnet. */ /* XXX Lock interfaces to avoid races. */ CURVNET_SET_QUIET(pr->pr_vnet); difp = ifunit(ifname); if (difp != NULL) { CURVNET_RESTORE(); prison_free(pr); return (EEXIST); } /* Make sure the VNET is stable. */ shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; if (shutdown) { CURVNET_RESTORE(); prison_free(pr); return (EBUSY); } CURVNET_RESTORE(); /* Move the interface into the child jail/vnet. */ if_vmove(ifp, pr->pr_vnet); /* Report the new if_xname back to the userland. */ sprintf(ifname, "%s", ifp->if_xname); prison_free(pr); return (0); } static int if_vmove_reclaim(struct thread *td, char *ifname, int jid) { struct prison *pr; struct vnet *vnet_dst; struct ifnet *ifp; int shutdown; /* Try to find the prison within our visibility. */ sx_slock(&allprison_lock); pr = prison_find_child(td->td_ucred->cr_prison, jid); sx_sunlock(&allprison_lock); if (pr == NULL) return (ENXIO); prison_hold_locked(pr); mtx_unlock(&pr->pr_mtx); /* Make sure the named iface exists in the source prison/vnet. */ CURVNET_SET(pr->pr_vnet); ifp = ifunit(ifname); /* XXX Lock to avoid races. */ if (ifp == NULL) { CURVNET_RESTORE(); prison_free(pr); return (ENXIO); } /* Do not try to move the iface from and to the same prison. */ vnet_dst = TD_TO_VNET(td); if (vnet_dst == ifp->if_vnet) { CURVNET_RESTORE(); prison_free(pr); return (EEXIST); } /* Make sure the VNET is stable. */ shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; if (shutdown) { CURVNET_RESTORE(); prison_free(pr); return (EBUSY); } /* Get interface back from child jail/vnet. */ if_vmove(ifp, vnet_dst); CURVNET_RESTORE(); /* Report the new if_xname back to the userland. */ sprintf(ifname, "%s", ifp->if_xname); prison_free(pr); return (0); } #endif /* VIMAGE */ /* * Add a group to an interface */ int if_addgroup(struct ifnet *ifp, const char *groupname) { struct ifg_list *ifgl; struct ifg_group *ifg = NULL; struct ifg_member *ifgm; int new = 0; if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && groupname[strlen(groupname) - 1] <= '9') return (EINVAL); IFNET_WLOCK(); CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) { IFNET_WUNLOCK(); return (EEXIST); } if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP, M_NOWAIT)) == NULL) { IFNET_WUNLOCK(); return (ENOMEM); } if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member), M_TEMP, M_NOWAIT)) == NULL) { free(ifgl, M_TEMP); IFNET_WUNLOCK(); return (ENOMEM); } CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) if (!strcmp(ifg->ifg_group, groupname)) break; if (ifg == NULL) { if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group), M_TEMP, M_NOWAIT)) == NULL) { free(ifgl, M_TEMP); free(ifgm, M_TEMP); IFNET_WUNLOCK(); return (ENOMEM); } strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); ifg->ifg_refcnt = 0; CK_STAILQ_INIT(&ifg->ifg_members); CK_STAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next); new = 1; } ifg->ifg_refcnt++; ifgl->ifgl_group = ifg; ifgm->ifgm_ifp = ifp; IF_ADDR_WLOCK(ifp); CK_STAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); CK_STAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); IF_ADDR_WUNLOCK(ifp); IFNET_WUNLOCK(); if (new) EVENTHANDLER_INVOKE(group_attach_event, ifg); EVENTHANDLER_INVOKE(group_change_event, groupname); return (0); } /* * Remove a group from an interface */ int if_delgroup(struct ifnet *ifp, const char *groupname) { struct ifg_list *ifgl; struct ifg_member *ifgm; int freeifgl; IFNET_WLOCK(); CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) break; if (ifgl == NULL) { IFNET_WUNLOCK(); return (ENOENT); } freeifgl = 0; IF_ADDR_WLOCK(ifp); CK_STAILQ_REMOVE(&ifp->if_groups, ifgl, ifg_list, ifgl_next); IF_ADDR_WUNLOCK(ifp); CK_STAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) if (ifgm->ifgm_ifp == ifp) break; if (ifgm != NULL) CK_STAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifg_member, ifgm_next); if (--ifgl->ifgl_group->ifg_refcnt == 0) { CK_STAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_group, ifg_next); freeifgl = 1; } IFNET_WUNLOCK(); epoch_wait_preempt(net_epoch_preempt); if (freeifgl) { EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); free(ifgl->ifgl_group, M_TEMP); } free(ifgm, M_TEMP); free(ifgl, M_TEMP); EVENTHANDLER_INVOKE(group_change_event, groupname); return (0); } /* * Remove an interface from all groups */ static void if_delgroups(struct ifnet *ifp) { struct ifg_list *ifgl; struct ifg_member *ifgm; char groupname[IFNAMSIZ]; int ifglfree; IFNET_WLOCK(); while (!CK_STAILQ_EMPTY(&ifp->if_groups)) { ifgl = CK_STAILQ_FIRST(&ifp->if_groups); strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ); IF_ADDR_WLOCK(ifp); CK_STAILQ_REMOVE(&ifp->if_groups, ifgl, ifg_list, ifgl_next); IF_ADDR_WUNLOCK(ifp); CK_STAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) if (ifgm->ifgm_ifp == ifp) break; if (ifgm != NULL) CK_STAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifg_member, ifgm_next); ifglfree = 0; if (--ifgl->ifgl_group->ifg_refcnt == 0) { CK_STAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_group, ifg_next); ifglfree = 1; } IFNET_WUNLOCK(); epoch_wait_preempt(net_epoch_preempt); free(ifgm, M_TEMP); if (ifglfree) { EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); free(ifgl->ifgl_group, M_TEMP); } EVENTHANDLER_INVOKE(group_change_event, groupname); IFNET_WLOCK(); } IFNET_WUNLOCK(); } static char * ifgr_group_get(void *ifgrp) { union ifgroupreq_union *ifgrup; ifgrup = ifgrp; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) return (&ifgrup->ifgr32.ifgr_ifgru.ifgru_group[0]); #endif return (&ifgrup->ifgr.ifgr_ifgru.ifgru_group[0]); } static struct ifg_req * ifgr_groups_get(void *ifgrp) { union ifgroupreq_union *ifgrup; ifgrup = ifgrp; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) return ((struct ifg_req *)(uintptr_t) ifgrup->ifgr32.ifgr_ifgru.ifgru_groups); #endif return (ifgrup->ifgr.ifgr_ifgru.ifgru_groups); } /* * Stores all groups from an interface in memory pointed to by ifgr. */ static int if_getgroup(struct ifgroupreq *ifgr, struct ifnet *ifp) { int len, error; struct ifg_list *ifgl; struct ifg_req ifgrq, *ifgp; if (ifgr->ifgr_len == 0) { IF_ADDR_RLOCK(ifp); CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) ifgr->ifgr_len += sizeof(struct ifg_req); IF_ADDR_RUNLOCK(ifp); return (0); } len = ifgr->ifgr_len; ifgp = ifgr_groups_get(ifgr); /* XXX: wire */ IF_ADDR_RLOCK(ifp); CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { if (len < sizeof(ifgrq)) { IF_ADDR_RUNLOCK(ifp); return (EINVAL); } bzero(&ifgrq, sizeof ifgrq); strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, sizeof(ifgrq.ifgrq_group)); if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { IF_ADDR_RUNLOCK(ifp); return (error); } len -= sizeof(ifgrq); ifgp++; } IF_ADDR_RUNLOCK(ifp); return (0); } /* * Stores all members of a group in memory pointed to by igfr */ static int if_getgroupmembers(struct ifgroupreq *ifgr) { struct ifg_group *ifg; struct ifg_member *ifgm; struct ifg_req ifgrq, *ifgp; int len, error; IFNET_RLOCK(); CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) break; if (ifg == NULL) { IFNET_RUNLOCK(); return (ENOENT); } if (ifgr->ifgr_len == 0) { CK_STAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) ifgr->ifgr_len += sizeof(ifgrq); IFNET_RUNLOCK(); return (0); } len = ifgr->ifgr_len; ifgp = ifgr_groups_get(ifgr); CK_STAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { if (len < sizeof(ifgrq)) { IFNET_RUNLOCK(); return (EINVAL); } bzero(&ifgrq, sizeof ifgrq); strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, sizeof(ifgrq.ifgrq_member)); if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { IFNET_RUNLOCK(); return (error); } len -= sizeof(ifgrq); ifgp++; } IFNET_RUNLOCK(); return (0); } /* * Return counter values from counter(9)s stored in ifnet. */ uint64_t if_get_counter_default(struct ifnet *ifp, ift_counter cnt) { KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); return (counter_u64_fetch(ifp->if_counters[cnt])); } /* * Increase an ifnet counter. Usually used for counters shared * between the stack and a driver, but function supports them all. */ void if_inc_counter(struct ifnet *ifp, ift_counter cnt, int64_t inc) { KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); counter_u64_add(ifp->if_counters[cnt], inc); } /* * Copy data from ifnet to userland API structure if_data. */ void if_data_copy(struct ifnet *ifp, struct if_data *ifd) { ifd->ifi_type = ifp->if_type; ifd->ifi_physical = 0; ifd->ifi_addrlen = ifp->if_addrlen; ifd->ifi_hdrlen = ifp->if_hdrlen; ifd->ifi_link_state = ifp->if_link_state; ifd->ifi_vhid = 0; ifd->ifi_datalen = sizeof(struct if_data); ifd->ifi_mtu = ifp->if_mtu; ifd->ifi_metric = ifp->if_metric; ifd->ifi_baudrate = ifp->if_baudrate; ifd->ifi_hwassist = ifp->if_hwassist; ifd->ifi_epoch = ifp->if_epoch; ifd->ifi_lastchange = ifp->if_lastchange; ifd->ifi_ipackets = ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS); ifd->ifi_ierrors = ifp->if_get_counter(ifp, IFCOUNTER_IERRORS); ifd->ifi_opackets = ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS); ifd->ifi_oerrors = ifp->if_get_counter(ifp, IFCOUNTER_OERRORS); ifd->ifi_collisions = ifp->if_get_counter(ifp, IFCOUNTER_COLLISIONS); ifd->ifi_ibytes = ifp->if_get_counter(ifp, IFCOUNTER_IBYTES); ifd->ifi_obytes = ifp->if_get_counter(ifp, IFCOUNTER_OBYTES); ifd->ifi_imcasts = ifp->if_get_counter(ifp, IFCOUNTER_IMCASTS); ifd->ifi_omcasts = ifp->if_get_counter(ifp, IFCOUNTER_OMCASTS); ifd->ifi_iqdrops = ifp->if_get_counter(ifp, IFCOUNTER_IQDROPS); ifd->ifi_oqdrops = ifp->if_get_counter(ifp, IFCOUNTER_OQDROPS); ifd->ifi_noproto = ifp->if_get_counter(ifp, IFCOUNTER_NOPROTO); } /* * Wrapper functions for struct ifnet address list locking macros. These are * used by kernel modules to avoid encoding programming interface or binary * interface assumptions that may be violated when kernel-internal locking * approaches change. */ void if_addr_rlock(struct ifnet *ifp) { - MPASS(*(uint64_t *)&ifp->if_addr_et == 0); - epoch_enter_preempt(net_epoch_preempt, &ifp->if_addr_et); + + epoch_enter_preempt(net_epoch_preempt, curthread->td_et); } void if_addr_runlock(struct ifnet *ifp) { - epoch_exit_preempt(net_epoch_preempt, &ifp->if_addr_et); -#ifdef INVARIANTS - bzero(&ifp->if_addr_et, sizeof(struct epoch_tracker)); -#endif + + epoch_exit_preempt(net_epoch_preempt, curthread->td_et); } void if_maddr_rlock(if_t ifp) { - MPASS(*(uint64_t *)&ifp->if_maddr_et == 0); - epoch_enter_preempt(net_epoch_preempt, &ifp->if_maddr_et); + epoch_enter_preempt(net_epoch_preempt, curthread->td_et); } void if_maddr_runlock(if_t ifp) { - epoch_exit_preempt(net_epoch_preempt, &ifp->if_maddr_et); -#ifdef INVARIANTS - bzero(&ifp->if_maddr_et, sizeof(struct epoch_tracker)); -#endif + epoch_exit_preempt(net_epoch_preempt, curthread->td_et); } /* * Initialization, destruction and refcounting functions for ifaddrs. */ struct ifaddr * ifa_alloc(size_t size, int flags) { struct ifaddr *ifa; KASSERT(size >= sizeof(struct ifaddr), ("%s: invalid size %zu", __func__, size)); ifa = malloc(size, M_IFADDR, M_ZERO | flags); if (ifa == NULL) return (NULL); if ((ifa->ifa_opackets = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_ipackets = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_obytes = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_ibytes = counter_u64_alloc(flags)) == NULL) goto fail; refcount_init(&ifa->ifa_refcnt, 1); return (ifa); fail: /* free(NULL) is okay */ counter_u64_free(ifa->ifa_opackets); counter_u64_free(ifa->ifa_ipackets); counter_u64_free(ifa->ifa_obytes); counter_u64_free(ifa->ifa_ibytes); free(ifa, M_IFADDR); return (NULL); } void ifa_ref(struct ifaddr *ifa) { refcount_acquire(&ifa->ifa_refcnt); } static void ifa_destroy(epoch_context_t ctx) { struct ifaddr *ifa; ifa = __containerof(ctx, struct ifaddr, ifa_epoch_ctx); counter_u64_free(ifa->ifa_opackets); counter_u64_free(ifa->ifa_ipackets); counter_u64_free(ifa->ifa_obytes); counter_u64_free(ifa->ifa_ibytes); free(ifa, M_IFADDR); } void ifa_free(struct ifaddr *ifa) { if (refcount_release(&ifa->ifa_refcnt)) epoch_call(net_epoch_preempt, &ifa->ifa_epoch_ctx, ifa_destroy); } static int ifa_maintain_loopback_route(int cmd, const char *otype, struct ifaddr *ifa, struct sockaddr *ia) { int error; struct rt_addrinfo info; struct sockaddr_dl null_sdl; struct ifnet *ifp; ifp = ifa->ifa_ifp; bzero(&info, sizeof(info)); if (cmd != RTM_DELETE) info.rti_ifp = V_loif; info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC | RTF_PINNED; info.rti_info[RTAX_DST] = ia; info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl; link_init_sdl(ifp, (struct sockaddr *)&null_sdl, ifp->if_type); error = rtrequest1_fib(cmd, &info, NULL, ifp->if_fib); if (error != 0 && !(cmd == RTM_ADD && error == EEXIST) && !(cmd == RTM_DELETE && error == ENOENT)) if_printf(ifp, "%s failed: %d\n", otype, error); return (error); } int ifa_add_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) { return (ifa_maintain_loopback_route(RTM_ADD, "insertion", ifa, ia)); } int ifa_del_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) { return (ifa_maintain_loopback_route(RTM_DELETE, "deletion", ifa, ia)); } int ifa_switch_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) { return (ifa_maintain_loopback_route(RTM_CHANGE, "switch", ifa, ia)); } /* * XXX: Because sockaddr_dl has deeper structure than the sockaddr * structs used to represent other address families, it is necessary * to perform a different comparison. */ #define sa_dl_equal(a1, a2) \ ((((const struct sockaddr_dl *)(a1))->sdl_len == \ ((const struct sockaddr_dl *)(a2))->sdl_len) && \ (bcmp(CLLADDR((const struct sockaddr_dl *)(a1)), \ CLLADDR((const struct sockaddr_dl *)(a2)), \ ((const struct sockaddr_dl *)(a1))->sdl_alen) == 0)) /* * Locate an interface based on a complete address. */ /*ARGSUSED*/ struct ifaddr * ifa_ifwithaddr(const struct sockaddr *addr) { struct ifnet *ifp; struct ifaddr *ifa; MPASS(in_epoch(net_epoch_preempt)); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if (sa_equal(addr, ifa->ifa_addr)) { goto done; } /* IP6 doesn't have broadcast */ if ((ifp->if_flags & IFF_BROADCAST) && ifa->ifa_broadaddr && ifa->ifa_broadaddr->sa_len != 0 && sa_equal(ifa->ifa_broadaddr, addr)) { goto done; } } } ifa = NULL; done: return (ifa); } int ifa_ifwithaddr_check(const struct sockaddr *addr) { int rc; NET_EPOCH_ENTER(); rc = (ifa_ifwithaddr(addr) != NULL); NET_EPOCH_EXIT(); return (rc); } /* * Locate an interface based on the broadcast address. */ /* ARGSUSED */ struct ifaddr * ifa_ifwithbroadaddr(const struct sockaddr *addr, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; MPASS(in_epoch(net_epoch_preempt)); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if ((ifp->if_flags & IFF_BROADCAST) && ifa->ifa_broadaddr && ifa->ifa_broadaddr->sa_len != 0 && sa_equal(ifa->ifa_broadaddr, addr)) { goto done; } } } ifa = NULL; done: return (ifa); } /* * Locate the point to point interface with a given destination address. */ /*ARGSUSED*/ struct ifaddr * ifa_ifwithdstaddr(const struct sockaddr *addr, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; MPASS(in_epoch(net_epoch_preempt)); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((ifp->if_flags & IFF_POINTOPOINT) == 0) continue; if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if (ifa->ifa_dstaddr != NULL && sa_equal(addr, ifa->ifa_dstaddr)) { goto done; } } } ifa = NULL; done: return (ifa); } /* * Find an interface on a specific network. If many, choice * is most specific found. */ struct ifaddr * ifa_ifwithnet(const struct sockaddr *addr, int ignore_ptp, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; struct ifaddr *ifa_maybe = NULL; u_int af = addr->sa_family; const char *addr_data = addr->sa_data, *cplim; MPASS(in_epoch(net_epoch_preempt)); /* * AF_LINK addresses can be looked up directly by their index number, * so do that if we can. */ if (af == AF_LINK) { const struct sockaddr_dl *sdl = (const struct sockaddr_dl *)addr; if (sdl->sdl_index && sdl->sdl_index <= V_if_index) return (ifaddr_byindex(sdl->sdl_index)); } /* * Scan though each interface, looking for ones that have addresses * in this address family and the requested fib. Maintain a reference * on ifa_maybe once we find one, as we release the IF_ADDR_RLOCK() that * kept it stable when we move onto the next interface. */ CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { const char *cp, *cp2, *cp3; if (ifa->ifa_addr->sa_family != af) next: continue; if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT && !ignore_ptp) { /* * This is a bit broken as it doesn't * take into account that the remote end may * be a single node in the network we are * looking for. * The trouble is that we don't know the * netmask for the remote end. */ if (ifa->ifa_dstaddr != NULL && sa_equal(addr, ifa->ifa_dstaddr)) { goto done; } } else { /* * Scan all the bits in the ifa's address. * If a bit dissagrees with what we are * looking for, mask it with the netmask * to see if it really matters. * (A byte at a time) */ if (ifa->ifa_netmask == 0) continue; cp = addr_data; cp2 = ifa->ifa_addr->sa_data; cp3 = ifa->ifa_netmask->sa_data; cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; while (cp3 < cplim) if ((*cp++ ^ *cp2++) & *cp3++) goto next; /* next address! */ /* * If the netmask of what we just found * is more specific than what we had before * (if we had one), or if the virtual status * of new prefix is better than of the old one, * then remember the new one before continuing * to search for an even better one. */ if (ifa_maybe == NULL || ifa_preferred(ifa_maybe, ifa) || rn_refines((caddr_t)ifa->ifa_netmask, (caddr_t)ifa_maybe->ifa_netmask)) { ifa_maybe = ifa; } } } } ifa = ifa_maybe; ifa_maybe = NULL; done: return (ifa); } /* * Find an interface address specific to an interface best matching * a given address. */ struct ifaddr * ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp) { struct ifaddr *ifa; const char *cp, *cp2, *cp3; char *cplim; struct ifaddr *ifa_maybe = NULL; u_int af = addr->sa_family; if (af >= AF_MAX) return (NULL); MPASS(in_epoch(net_epoch_preempt)); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != af) continue; if (ifa_maybe == NULL) ifa_maybe = ifa; if (ifa->ifa_netmask == 0) { if (sa_equal(addr, ifa->ifa_addr) || (ifa->ifa_dstaddr && sa_equal(addr, ifa->ifa_dstaddr))) goto done; continue; } if (ifp->if_flags & IFF_POINTOPOINT) { if (sa_equal(addr, ifa->ifa_dstaddr)) goto done; } else { cp = addr->sa_data; cp2 = ifa->ifa_addr->sa_data; cp3 = ifa->ifa_netmask->sa_data; cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; for (; cp3 < cplim; cp3++) if ((*cp++ ^ *cp2++) & *cp3) break; if (cp3 == cplim) goto done; } } ifa = ifa_maybe; done: return (ifa); } /* * See whether new ifa is better than current one: * 1) A non-virtual one is preferred over virtual. * 2) A virtual in master state preferred over any other state. * * Used in several address selecting functions. */ int ifa_preferred(struct ifaddr *cur, struct ifaddr *next) { return (cur->ifa_carp && (!next->ifa_carp || ((*carp_master_p)(next) && !(*carp_master_p)(cur)))); } #include /* * Default action when installing a route with a Link Level gateway. * Lookup an appropriate real ifa to point to. * This should be moved to /sys/net/link.c eventually. */ static void link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info) { struct ifaddr *ifa, *oifa; struct sockaddr *dst; struct ifnet *ifp; if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == NULL) || ((ifp = ifa->ifa_ifp) == NULL) || ((dst = rt_key(rt)) == NULL)) return; NET_EPOCH_ENTER(); ifa = ifaof_ifpforaddr(dst, ifp); if (ifa) { oifa = rt->rt_ifa; if (oifa != ifa) { ifa_free(oifa); ifa_ref(ifa); } rt->rt_ifa = ifa; if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) ifa->ifa_rtrequest(cmd, rt, info); } NET_EPOCH_EXIT(); } struct sockaddr_dl * link_alloc_sdl(size_t size, int flags) { return (malloc(size, M_TEMP, flags)); } void link_free_sdl(struct sockaddr *sa) { free(sa, M_TEMP); } /* * Fills in given sdl with interface basic info. * Returns pointer to filled sdl. */ struct sockaddr_dl * link_init_sdl(struct ifnet *ifp, struct sockaddr *paddr, u_char iftype) { struct sockaddr_dl *sdl; sdl = (struct sockaddr_dl *)paddr; memset(sdl, 0, sizeof(struct sockaddr_dl)); sdl->sdl_len = sizeof(struct sockaddr_dl); sdl->sdl_family = AF_LINK; sdl->sdl_index = ifp->if_index; sdl->sdl_type = iftype; return (sdl); } /* * Mark an interface down and notify protocols of * the transition. */ static void if_unroute(struct ifnet *ifp, int flag, int fam) { struct ifaddr *ifa; KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP")); ifp->if_flags &= ~flag; getmicrotime(&ifp->if_lastchange); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) pfctlinput(PRC_IFDOWN, ifa->ifa_addr); ifp->if_qflush(ifp); if (ifp->if_carp) (*carp_linkstate_p)(ifp); rt_ifmsg(ifp); } /* * Mark an interface up and notify protocols of * the transition. */ static void if_route(struct ifnet *ifp, int flag, int fam) { struct ifaddr *ifa; KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP")); ifp->if_flags |= flag; getmicrotime(&ifp->if_lastchange); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) pfctlinput(PRC_IFUP, ifa->ifa_addr); if (ifp->if_carp) (*carp_linkstate_p)(ifp); rt_ifmsg(ifp); #ifdef INET6 in6_if_up(ifp); #endif } void (*vlan_link_state_p)(struct ifnet *); /* XXX: private from if_vlan */ void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */ struct ifnet *(*vlan_trunkdev_p)(struct ifnet *); struct ifnet *(*vlan_devat_p)(struct ifnet *, uint16_t); int (*vlan_tag_p)(struct ifnet *, uint16_t *); int (*vlan_pcp_p)(struct ifnet *, uint16_t *); int (*vlan_setcookie_p)(struct ifnet *, void *); void *(*vlan_cookie_p)(struct ifnet *); /* * Handle a change in the interface link state. To avoid LORs * between driver lock and upper layer locks, as well as possible * recursions, we post event to taskqueue, and all job * is done in static do_link_state_change(). */ void if_link_state_change(struct ifnet *ifp, int link_state) { /* Return if state hasn't changed. */ if (ifp->if_link_state == link_state) return; ifp->if_link_state = link_state; taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask); } static void do_link_state_change(void *arg, int pending) { struct ifnet *ifp = (struct ifnet *)arg; int link_state = ifp->if_link_state; CURVNET_SET(ifp->if_vnet); /* Notify that the link state has changed. */ rt_ifmsg(ifp); if (ifp->if_vlantrunk != NULL) (*vlan_link_state_p)(ifp); if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) && ifp->if_l2com != NULL) (*ng_ether_link_state_p)(ifp, link_state); if (ifp->if_carp) (*carp_linkstate_p)(ifp); if (ifp->if_bridge) ifp->if_bridge_linkstate(ifp); if (ifp->if_lagg) (*lagg_linkstate_p)(ifp, link_state); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); if (pending > 1) if_printf(ifp, "%d link states coalesced\n", pending); if (log_link_state_change) if_printf(ifp, "link state changed to %s\n", (link_state == LINK_STATE_UP) ? "UP" : "DOWN" ); EVENTHANDLER_INVOKE(ifnet_link_event, ifp, link_state); CURVNET_RESTORE(); } /* * Mark an interface down and notify protocols of * the transition. */ void if_down(struct ifnet *ifp) { EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_DOWN); if_unroute(ifp, IFF_UP, AF_UNSPEC); } /* * Mark an interface up and notify protocols of * the transition. */ void if_up(struct ifnet *ifp) { if_route(ifp, IFF_UP, AF_UNSPEC); EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_UP); } /* * Flush an interface queue. */ void if_qflush(struct ifnet *ifp) { struct mbuf *m, *n; struct ifaltq *ifq; ifq = &ifp->if_snd; IFQ_LOCK(ifq); #ifdef ALTQ if (ALTQ_IS_ENABLED(ifq)) ALTQ_PURGE(ifq); #endif n = ifq->ifq_head; while ((m = n) != NULL) { n = m->m_nextpkt; m_freem(m); } ifq->ifq_head = 0; ifq->ifq_tail = 0; ifq->ifq_len = 0; IFQ_UNLOCK(ifq); } /* * Map interface name to interface structure pointer, with or without * returning a reference. */ struct ifnet * ifunit_ref(const char *name) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 && !(ifp->if_flags & IFF_DYING)) break; } if (ifp != NULL) if_ref(ifp); IFNET_RUNLOCK_NOSLEEP(); return (ifp); } struct ifnet * ifunit(const char *name) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0) break; } IFNET_RUNLOCK_NOSLEEP(); return (ifp); } static void * ifr_buffer_get_buffer(void *data) { union ifreq_union *ifrup; ifrup = data; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) return ((void *)(uintptr_t) ifrup->ifr32.ifr_ifru.ifru_buffer.buffer); #endif return (ifrup->ifr.ifr_ifru.ifru_buffer.buffer); } static void ifr_buffer_set_buffer_null(void *data) { union ifreq_union *ifrup; ifrup = data; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) ifrup->ifr32.ifr_ifru.ifru_buffer.buffer = 0; else #endif ifrup->ifr.ifr_ifru.ifru_buffer.buffer = NULL; } static size_t ifr_buffer_get_length(void *data) { union ifreq_union *ifrup; ifrup = data; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) return (ifrup->ifr32.ifr_ifru.ifru_buffer.length); #endif return (ifrup->ifr.ifr_ifru.ifru_buffer.length); } static void ifr_buffer_set_length(void *data, size_t len) { union ifreq_union *ifrup; ifrup = data; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) ifrup->ifr32.ifr_ifru.ifru_buffer.length = len; else #endif ifrup->ifr.ifr_ifru.ifru_buffer.length = len; } void * ifr_data_get_ptr(void *ifrp) { union ifreq_union *ifrup; ifrup = ifrp; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) return ((void *)(uintptr_t) ifrup->ifr32.ifr_ifru.ifru_data); #endif return (ifrup->ifr.ifr_ifru.ifru_data); } /* * Hardware specific interface ioctls. */ int ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) { struct ifreq *ifr; int error = 0, do_ifup = 0; int new_flags, temp_flags; size_t namelen, onamelen; size_t descrlen; char *descrbuf, *odescrbuf; char new_name[IFNAMSIZ]; struct ifaddr *ifa; struct sockaddr_dl *sdl; ifr = (struct ifreq *)data; switch (cmd) { case SIOCGIFINDEX: ifr->ifr_index = ifp->if_index; break; case SIOCGIFFLAGS: temp_flags = ifp->if_flags | ifp->if_drv_flags; ifr->ifr_flags = temp_flags & 0xffff; ifr->ifr_flagshigh = temp_flags >> 16; break; case SIOCGIFCAP: ifr->ifr_reqcap = ifp->if_capabilities; ifr->ifr_curcap = ifp->if_capenable; break; #ifdef MAC case SIOCGIFMAC: error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp); break; #endif case SIOCGIFMETRIC: ifr->ifr_metric = ifp->if_metric; break; case SIOCGIFMTU: ifr->ifr_mtu = ifp->if_mtu; break; case SIOCGIFPHYS: /* XXXGL: did this ever worked? */ ifr->ifr_phys = 0; break; case SIOCGIFDESCR: error = 0; sx_slock(&ifdescr_sx); if (ifp->if_description == NULL) error = ENOMSG; else { /* space for terminating nul */ descrlen = strlen(ifp->if_description) + 1; if (ifr_buffer_get_length(ifr) < descrlen) ifr_buffer_set_buffer_null(ifr); else error = copyout(ifp->if_description, ifr_buffer_get_buffer(ifr), descrlen); ifr_buffer_set_length(ifr, descrlen); } sx_sunlock(&ifdescr_sx); break; case SIOCSIFDESCR: error = priv_check(td, PRIV_NET_SETIFDESCR); if (error) return (error); /* * Copy only (length-1) bytes to make sure that * if_description is always nul terminated. The * length parameter is supposed to count the * terminating nul in. */ if (ifr_buffer_get_length(ifr) > ifdescr_maxlen) return (ENAMETOOLONG); else if (ifr_buffer_get_length(ifr) == 0) descrbuf = NULL; else { descrbuf = malloc(ifr_buffer_get_length(ifr), M_IFDESCR, M_WAITOK | M_ZERO); error = copyin(ifr_buffer_get_buffer(ifr), descrbuf, ifr_buffer_get_length(ifr) - 1); if (error) { free(descrbuf, M_IFDESCR); break; } } sx_xlock(&ifdescr_sx); odescrbuf = ifp->if_description; ifp->if_description = descrbuf; sx_xunlock(&ifdescr_sx); getmicrotime(&ifp->if_lastchange); free(odescrbuf, M_IFDESCR); break; case SIOCGIFFIB: ifr->ifr_fib = ifp->if_fib; break; case SIOCSIFFIB: error = priv_check(td, PRIV_NET_SETIFFIB); if (error) return (error); if (ifr->ifr_fib >= rt_numfibs) return (EINVAL); ifp->if_fib = ifr->ifr_fib; break; case SIOCSIFFLAGS: error = priv_check(td, PRIV_NET_SETIFFLAGS); if (error) return (error); /* * Currently, no driver owned flags pass the IFF_CANTCHANGE * check, so we don't need special handling here yet. */ new_flags = (ifr->ifr_flags & 0xffff) | (ifr->ifr_flagshigh << 16); if (ifp->if_flags & IFF_UP && (new_flags & IFF_UP) == 0) { if_down(ifp); } else if (new_flags & IFF_UP && (ifp->if_flags & IFF_UP) == 0) { do_ifup = 1; } /* See if permanently promiscuous mode bit is about to flip */ if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) { if (new_flags & IFF_PPROMISC) ifp->if_flags |= IFF_PROMISC; else if (ifp->if_pcount == 0) ifp->if_flags &= ~IFF_PROMISC; if (log_promisc_mode_change) if_printf(ifp, "permanently promiscuous mode %s\n", ((new_flags & IFF_PPROMISC) ? "enabled" : "disabled")); } ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | (new_flags &~ IFF_CANTCHANGE); if (ifp->if_ioctl) { (void) (*ifp->if_ioctl)(ifp, cmd, data); } if (do_ifup) if_up(ifp); getmicrotime(&ifp->if_lastchange); break; case SIOCSIFCAP: error = priv_check(td, PRIV_NET_SETIFCAP); if (error) return (error); if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); if (ifr->ifr_reqcap & ~ifp->if_capabilities) return (EINVAL); error = (*ifp->if_ioctl)(ifp, cmd, data); if (error == 0) getmicrotime(&ifp->if_lastchange); break; #ifdef MAC case SIOCSIFMAC: error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp); break; #endif case SIOCSIFNAME: error = priv_check(td, PRIV_NET_SETIFNAME); if (error) return (error); error = copyinstr(ifr_data_get_ptr(ifr), new_name, IFNAMSIZ, NULL); if (error != 0) return (error); if (new_name[0] == '\0') return (EINVAL); if (new_name[IFNAMSIZ-1] != '\0') { new_name[IFNAMSIZ-1] = '\0'; if (strlen(new_name) == IFNAMSIZ-1) return (EINVAL); } if (ifunit(new_name) != NULL) return (EEXIST); /* * XXX: Locking. Nothing else seems to lock if_flags, * and there are numerous other races with the * ifunit() checks not being atomic with namespace * changes (renames, vmoves, if_attach, etc). */ ifp->if_flags |= IFF_RENAMING; /* Announce the departure of the interface. */ rt_ifannouncemsg(ifp, IFAN_DEPARTURE); EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); if_printf(ifp, "changing name to '%s'\n", new_name); IF_ADDR_WLOCK(ifp); strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); ifa = ifp->if_addr; sdl = (struct sockaddr_dl *)ifa->ifa_addr; namelen = strlen(new_name); onamelen = sdl->sdl_nlen; /* * Move the address if needed. This is safe because we * allocate space for a name of length IFNAMSIZ when we * create this in if_attach(). */ if (namelen != onamelen) { bcopy(sdl->sdl_data + onamelen, sdl->sdl_data + namelen, sdl->sdl_alen); } bcopy(new_name, sdl->sdl_data, namelen); sdl->sdl_nlen = namelen; sdl = (struct sockaddr_dl *)ifa->ifa_netmask; bzero(sdl->sdl_data, onamelen); while (namelen != 0) sdl->sdl_data[--namelen] = 0xff; IF_ADDR_WUNLOCK(ifp); EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); /* Announce the return of the interface. */ rt_ifannouncemsg(ifp, IFAN_ARRIVAL); ifp->if_flags &= ~IFF_RENAMING; break; #ifdef VIMAGE case SIOCSIFVNET: error = priv_check(td, PRIV_NET_SETIFVNET); if (error) return (error); error = if_vmove_loan(td, ifp, ifr->ifr_name, ifr->ifr_jid); break; #endif case SIOCSIFMETRIC: error = priv_check(td, PRIV_NET_SETIFMETRIC); if (error) return (error); ifp->if_metric = ifr->ifr_metric; getmicrotime(&ifp->if_lastchange); break; case SIOCSIFPHYS: error = priv_check(td, PRIV_NET_SETIFPHYS); if (error) return (error); if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); error = (*ifp->if_ioctl)(ifp, cmd, data); if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCSIFMTU: { u_long oldmtu = ifp->if_mtu; error = priv_check(td, PRIV_NET_SETIFMTU); if (error) return (error); if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) return (EINVAL); if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); error = (*ifp->if_ioctl)(ifp, cmd, data); if (error == 0) { getmicrotime(&ifp->if_lastchange); rt_ifmsg(ifp); #ifdef INET NETDUMP_REINIT(ifp); #endif } /* * If the link MTU changed, do network layer specific procedure. */ if (ifp->if_mtu != oldmtu) { #ifdef INET6 nd6_setmtu(ifp); #endif rt_updatemtu(ifp); } break; } case SIOCADDMULTI: case SIOCDELMULTI: if (cmd == SIOCADDMULTI) error = priv_check(td, PRIV_NET_ADDMULTI); else error = priv_check(td, PRIV_NET_DELMULTI); if (error) return (error); /* Don't allow group membership on non-multicast interfaces. */ if ((ifp->if_flags & IFF_MULTICAST) == 0) return (EOPNOTSUPP); /* Don't let users screw up protocols' entries. */ if (ifr->ifr_addr.sa_family != AF_LINK) return (EINVAL); if (cmd == SIOCADDMULTI) { struct ifmultiaddr *ifma; /* * Userland is only permitted to join groups once * via the if_addmulti() KPI, because it cannot hold * struct ifmultiaddr * between calls. It may also * lose a race while we check if the membership * already exists. */ IF_ADDR_RLOCK(ifp); ifma = if_findmulti(ifp, &ifr->ifr_addr); IF_ADDR_RUNLOCK(ifp); if (ifma != NULL) error = EADDRINUSE; else error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); } else { error = if_delmulti(ifp, &ifr->ifr_addr); } if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCSIFPHYADDR: case SIOCDIFPHYADDR: #ifdef INET6 case SIOCSIFPHYADDR_IN6: #endif case SIOCSIFMEDIA: case SIOCSIFGENERIC: error = priv_check(td, PRIV_NET_HWIOCTL); if (error) return (error); if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); error = (*ifp->if_ioctl)(ifp, cmd, data); if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCGIFSTATUS: case SIOCGIFPSRCADDR: case SIOCGIFPDSTADDR: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: case SIOCGIFGENERIC: case SIOCGIFRSSKEY: case SIOCGIFRSSHASH: if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); error = (*ifp->if_ioctl)(ifp, cmd, data); break; case SIOCSIFLLADDR: error = priv_check(td, PRIV_NET_SETLLADDR); if (error) return (error); error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len); break; case SIOCGHWADDR: error = if_gethwaddr(ifp, ifr); break; CASE_IOC_IFGROUPREQ(SIOCAIFGROUP): error = priv_check(td, PRIV_NET_ADDIFGROUP); if (error) return (error); if ((error = if_addgroup(ifp, ifgr_group_get((struct ifgroupreq *)data)))) return (error); break; CASE_IOC_IFGROUPREQ(SIOCGIFGROUP): if ((error = if_getgroup((struct ifgroupreq *)data, ifp))) return (error); break; CASE_IOC_IFGROUPREQ(SIOCDIFGROUP): error = priv_check(td, PRIV_NET_DELIFGROUP); if (error) return (error); if ((error = if_delgroup(ifp, ifgr_group_get((struct ifgroupreq *)data)))) return (error); break; default: error = ENOIOCTL; break; } return (error); } #ifdef COMPAT_FREEBSD32 struct ifconf32 { int32_t ifc_len; union { uint32_t ifcu_buf; uint32_t ifcu_req; } ifc_ifcu; }; #define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32) #endif #ifdef COMPAT_FREEBSD32 static void ifmr_init(struct ifmediareq *ifmr, caddr_t data) { struct ifmediareq32 *ifmr32; ifmr32 = (struct ifmediareq32 *)data; memcpy(ifmr->ifm_name, ifmr32->ifm_name, sizeof(ifmr->ifm_name)); ifmr->ifm_current = ifmr32->ifm_current; ifmr->ifm_mask = ifmr32->ifm_mask; ifmr->ifm_status = ifmr32->ifm_status; ifmr->ifm_active = ifmr32->ifm_active; ifmr->ifm_count = ifmr32->ifm_count; ifmr->ifm_ulist = (int *)(uintptr_t)ifmr32->ifm_ulist; } static void ifmr_update(const struct ifmediareq *ifmr, caddr_t data) { struct ifmediareq32 *ifmr32; ifmr32 = (struct ifmediareq32 *)data; ifmr32->ifm_current = ifmr->ifm_current; ifmr32->ifm_mask = ifmr->ifm_mask; ifmr32->ifm_status = ifmr->ifm_status; ifmr32->ifm_active = ifmr->ifm_active; ifmr32->ifm_count = ifmr->ifm_count; } #endif /* * Interface ioctls. */ int ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td) { #ifdef COMPAT_FREEBSD32 caddr_t saved_data = NULL; struct ifmediareq ifmr; struct ifmediareq *ifmrp; #endif struct ifnet *ifp; struct ifreq *ifr; int error; int oif_flags; #ifdef VIMAGE int shutdown; #endif CURVNET_SET(so->so_vnet); #ifdef VIMAGE /* Make sure the VNET is stable. */ shutdown = (so->so_vnet->vnet_state > SI_SUB_VNET && so->so_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; if (shutdown) { CURVNET_RESTORE(); return (EBUSY); } #endif switch (cmd) { case SIOCGIFCONF: error = ifconf(cmd, data); CURVNET_RESTORE(); return (error); #ifdef COMPAT_FREEBSD32 case SIOCGIFCONF32: { struct ifconf32 *ifc32; struct ifconf ifc; ifc32 = (struct ifconf32 *)data; ifc.ifc_len = ifc32->ifc_len; ifc.ifc_buf = PTRIN(ifc32->ifc_buf); error = ifconf(SIOCGIFCONF, (void *)&ifc); CURVNET_RESTORE(); if (error == 0) ifc32->ifc_len = ifc.ifc_len; return (error); } #endif } #ifdef COMPAT_FREEBSD32 ifmrp = NULL; switch (cmd) { case SIOCGIFMEDIA32: case SIOCGIFXMEDIA32: ifmrp = &ifmr; ifmr_init(ifmrp, data); cmd = _IOC_NEWTYPE(cmd, struct ifmediareq); saved_data = data; data = (caddr_t)ifmrp; } #endif ifr = (struct ifreq *)data; switch (cmd) { #ifdef VIMAGE case SIOCSIFRVNET: error = priv_check(td, PRIV_NET_SETIFVNET); if (error == 0) error = if_vmove_reclaim(td, ifr->ifr_name, ifr->ifr_jid); goto out_noref; #endif case SIOCIFCREATE: case SIOCIFCREATE2: error = priv_check(td, PRIV_NET_IFCREATE); if (error == 0) error = if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), cmd == SIOCIFCREATE2 ? ifr_data_get_ptr(ifr) : NULL); goto out_noref; case SIOCIFDESTROY: error = priv_check(td, PRIV_NET_IFDESTROY); if (error == 0) error = if_clone_destroy(ifr->ifr_name); goto out_noref; case SIOCIFGCLONERS: error = if_clone_list((struct if_clonereq *)data); goto out_noref; CASE_IOC_IFGROUPREQ(SIOCGIFGMEMB): error = if_getgroupmembers((struct ifgroupreq *)data); goto out_noref; #if defined(INET) || defined(INET6) case SIOCSVH: case SIOCGVH: if (carp_ioctl_p == NULL) error = EPROTONOSUPPORT; else error = (*carp_ioctl_p)(ifr, cmd, td); goto out_noref; #endif } ifp = ifunit_ref(ifr->ifr_name); if (ifp == NULL) { error = ENXIO; goto out_noref; } error = ifhwioctl(cmd, ifp, data, td); if (error != ENOIOCTL) goto out_ref; oif_flags = ifp->if_flags; if (so->so_proto == NULL) { error = EOPNOTSUPP; goto out_ref; } /* * Pass the request on to the socket control method, and if the * latter returns EOPNOTSUPP, directly to the interface. * * Make an exception for the legacy SIOCSIF* requests. Drivers * trust SIOCSIFADDR et al to come from an already privileged * layer, and do not perform any credentials checks or input * validation. */ error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, ifp, td)); if (error == EOPNOTSUPP && ifp != NULL && ifp->if_ioctl != NULL && cmd != SIOCSIFADDR && cmd != SIOCSIFBRDADDR && cmd != SIOCSIFDSTADDR && cmd != SIOCSIFNETMASK) error = (*ifp->if_ioctl)(ifp, cmd, data); if ((oif_flags ^ ifp->if_flags) & IFF_UP) { #ifdef INET6 if (ifp->if_flags & IFF_UP) in6_if_up(ifp); #endif } out_ref: if_rele(ifp); out_noref: #ifdef COMPAT_FREEBSD32 if (ifmrp != NULL) { KASSERT((cmd == SIOCGIFMEDIA || cmd == SIOCGIFXMEDIA), ("ifmrp non-NULL, but cmd is not an ifmedia req 0x%lx", cmd)); data = saved_data; ifmr_update(ifmrp, data); } #endif CURVNET_RESTORE(); return (error); } /* * The code common to handling reference counted flags, * e.g., in ifpromisc() and if_allmulti(). * The "pflag" argument can specify a permanent mode flag to check, * such as IFF_PPROMISC for promiscuous mode; should be 0 if none. * * Only to be used on stack-owned flags, not driver-owned flags. */ static int if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch) { struct ifreq ifr; int error; int oldflags, oldcount; /* Sanity checks to catch programming errors */ KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0, ("%s: setting driver-owned flag %d", __func__, flag)); if (onswitch) KASSERT(*refcount >= 0, ("%s: increment negative refcount %d for flag %d", __func__, *refcount, flag)); else KASSERT(*refcount > 0, ("%s: decrement non-positive refcount %d for flag %d", __func__, *refcount, flag)); /* In case this mode is permanent, just touch refcount */ if (ifp->if_flags & pflag) { *refcount += onswitch ? 1 : -1; return (0); } /* Save ifnet parameters for if_ioctl() may fail */ oldcount = *refcount; oldflags = ifp->if_flags; /* * See if we aren't the only and touching refcount is enough. * Actually toggle interface flag if we are the first or last. */ if (onswitch) { if ((*refcount)++) return (0); ifp->if_flags |= flag; } else { if (--(*refcount)) return (0); ifp->if_flags &= ~flag; } /* Call down the driver since we've changed interface flags */ if (ifp->if_ioctl == NULL) { error = EOPNOTSUPP; goto recover; } ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); if (error) goto recover; /* Notify userland that interface flags have changed */ rt_ifmsg(ifp); return (0); recover: /* Recover after driver error */ *refcount = oldcount; ifp->if_flags = oldflags; return (error); } /* * Set/clear promiscuous mode on interface ifp based on the truth value * of pswitch. The calls are reference counted so that only the first * "on" request actually has an effect, as does the final "off" request. * Results are undefined if the "off" and "on" requests are not matched. */ int ifpromisc(struct ifnet *ifp, int pswitch) { int error; int oldflags = ifp->if_flags; error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC, &ifp->if_pcount, pswitch); /* If promiscuous mode status has changed, log a message */ if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC) && log_promisc_mode_change) if_printf(ifp, "promiscuous mode %s\n", (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled"); return (error); } /* * Return interface configuration * of system. List may be used * in later ioctl's (above) to get * other information. */ /*ARGSUSED*/ static int ifconf(u_long cmd, caddr_t data) { struct ifconf *ifc = (struct ifconf *)data; struct ifnet *ifp; struct ifaddr *ifa; struct ifreq ifr; struct sbuf *sb; int error, full = 0, valid_len, max_len; /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */ max_len = MAXPHYS - 1; /* Prevent hostile input from being able to crash the system */ if (ifc->ifc_len <= 0) return (EINVAL); again: if (ifc->ifc_len <= max_len) { max_len = ifc->ifc_len; full = 1; } sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN); max_len = 0; valid_len = 0; IFNET_RLOCK(); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { int addrs; /* * Zero the ifr to make sure we don't disclose the contents * of the stack. */ memset(&ifr, 0, sizeof(ifr)); if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) >= sizeof(ifr.ifr_name)) { sbuf_delete(sb); IFNET_RUNLOCK(); return (ENAMETOOLONG); } addrs = 0; IF_ADDR_RLOCK(ifp); CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { struct sockaddr *sa = ifa->ifa_addr; if (prison_if(curthread->td_ucred, sa) != 0) continue; addrs++; if (sa->sa_len <= sizeof(*sa)) { if (sa->sa_len < sizeof(*sa)) { memset(&ifr.ifr_ifru.ifru_addr, 0, sizeof(ifr.ifr_ifru.ifru_addr)); memcpy(&ifr.ifr_ifru.ifru_addr, sa, sa->sa_len); } else ifr.ifr_ifru.ifru_addr = *sa; sbuf_bcat(sb, &ifr, sizeof(ifr)); max_len += sizeof(ifr); } else { sbuf_bcat(sb, &ifr, offsetof(struct ifreq, ifr_addr)); max_len += offsetof(struct ifreq, ifr_addr); sbuf_bcat(sb, sa, sa->sa_len); max_len += sa->sa_len; } if (sbuf_error(sb) == 0) valid_len = sbuf_len(sb); } IF_ADDR_RUNLOCK(ifp); if (addrs == 0) { sbuf_bcat(sb, &ifr, sizeof(ifr)); max_len += sizeof(ifr); if (sbuf_error(sb) == 0) valid_len = sbuf_len(sb); } } IFNET_RUNLOCK(); /* * If we didn't allocate enough space (uncommon), try again. If * we have already allocated as much space as we are allowed, * return what we've got. */ if (valid_len != max_len && !full) { sbuf_delete(sb); goto again; } ifc->ifc_len = valid_len; sbuf_finish(sb); error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len); sbuf_delete(sb); return (error); } /* * Just like ifpromisc(), but for all-multicast-reception mode. */ int if_allmulti(struct ifnet *ifp, int onswitch) { return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch)); } struct ifmultiaddr * if_findmulti(struct ifnet *ifp, const struct sockaddr *sa) { struct ifmultiaddr *ifma; IF_ADDR_LOCK_ASSERT(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (sa->sa_family == AF_LINK) { if (sa_dl_equal(ifma->ifma_addr, sa)) break; } else { if (sa_equal(ifma->ifma_addr, sa)) break; } } return ifma; } /* * Allocate a new ifmultiaddr and initialize based on passed arguments. We * make copies of passed sockaddrs. The ifmultiaddr will not be added to * the ifnet multicast address list here, so the caller must do that and * other setup work (such as notifying the device driver). The reference * count is initialized to 1. */ static struct ifmultiaddr * if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa, int mflags) { struct ifmultiaddr *ifma; struct sockaddr *dupsa; ifma = malloc(sizeof *ifma, M_IFMADDR, mflags | M_ZERO); if (ifma == NULL) return (NULL); dupsa = malloc(sa->sa_len, M_IFMADDR, mflags); if (dupsa == NULL) { free(ifma, M_IFMADDR); return (NULL); } bcopy(sa, dupsa, sa->sa_len); ifma->ifma_addr = dupsa; ifma->ifma_ifp = ifp; ifma->ifma_refcount = 1; ifma->ifma_protospec = NULL; if (llsa == NULL) { ifma->ifma_lladdr = NULL; return (ifma); } dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags); if (dupsa == NULL) { free(ifma->ifma_addr, M_IFMADDR); free(ifma, M_IFMADDR); return (NULL); } bcopy(llsa, dupsa, llsa->sa_len); ifma->ifma_lladdr = dupsa; return (ifma); } /* * if_freemulti: free ifmultiaddr structure and possibly attached related * addresses. The caller is responsible for implementing reference * counting, notifying the driver, handling routing messages, and releasing * any dependent link layer state. */ #ifdef MCAST_VERBOSE extern void kdb_backtrace(void); #endif static void if_freemulti_internal(struct ifmultiaddr *ifma) { KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d", ifma->ifma_refcount)); if (ifma->ifma_lladdr != NULL) free(ifma->ifma_lladdr, M_IFMADDR); #ifdef MCAST_VERBOSE kdb_backtrace(); printf("%s freeing ifma: %p\n", __func__, ifma); #endif free(ifma->ifma_addr, M_IFMADDR); free(ifma, M_IFMADDR); } static void if_destroymulti(epoch_context_t ctx) { struct ifmultiaddr *ifma; ifma = __containerof(ctx, struct ifmultiaddr, ifma_epoch_ctx); if_freemulti_internal(ifma); } void if_freemulti(struct ifmultiaddr *ifma) { KASSERT(ifma->ifma_refcount == 0, ("if_freemulti_epoch: refcount %d", ifma->ifma_refcount)); epoch_call(net_epoch_preempt, &ifma->ifma_epoch_ctx, if_destroymulti); } /* * Register an additional multicast address with a network interface. * * - If the address is already present, bump the reference count on the * address and return. * - If the address is not link-layer, look up a link layer address. * - Allocate address structures for one or both addresses, and attach to the * multicast address list on the interface. If automatically adding a link * layer address, the protocol address will own a reference to the link * layer address, to be freed when it is freed. * - Notify the network device driver of an addition to the multicast address * list. * * 'sa' points to caller-owned memory with the desired multicast address. * * 'retifma' will be used to return a pointer to the resulting multicast * address reference, if desired. */ int if_addmulti(struct ifnet *ifp, struct sockaddr *sa, struct ifmultiaddr **retifma) { struct ifmultiaddr *ifma, *ll_ifma; struct sockaddr *llsa; struct sockaddr_dl sdl; int error; #ifdef INET IN_MULTI_LIST_UNLOCK_ASSERT(); #endif #ifdef INET6 IN6_MULTI_LIST_UNLOCK_ASSERT(); #endif /* * If the address is already present, return a new reference to it; * otherwise, allocate storage and set up a new address. */ IF_ADDR_WLOCK(ifp); ifma = if_findmulti(ifp, sa); if (ifma != NULL) { ifma->ifma_refcount++; if (retifma != NULL) *retifma = ifma; IF_ADDR_WUNLOCK(ifp); return (0); } /* * The address isn't already present; resolve the protocol address * into a link layer address, and then look that up, bump its * refcount or allocate an ifma for that also. * Most link layer resolving functions returns address data which * fits inside default sockaddr_dl structure. However callback * can allocate another sockaddr structure, in that case we need to * free it later. */ llsa = NULL; ll_ifma = NULL; if (ifp->if_resolvemulti != NULL) { /* Provide called function with buffer size information */ sdl.sdl_len = sizeof(sdl); llsa = (struct sockaddr *)&sdl; error = ifp->if_resolvemulti(ifp, &llsa, sa); if (error) goto unlock_out; } /* * Allocate the new address. Don't hook it up yet, as we may also * need to allocate a link layer multicast address. */ ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT); if (ifma == NULL) { error = ENOMEM; goto free_llsa_out; } /* * If a link layer address is found, we'll need to see if it's * already present in the address list, or allocate is as well. * When this block finishes, the link layer address will be on the * list. */ if (llsa != NULL) { ll_ifma = if_findmulti(ifp, llsa); if (ll_ifma == NULL) { ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT); if (ll_ifma == NULL) { --ifma->ifma_refcount; if_freemulti(ifma); error = ENOMEM; goto free_llsa_out; } ll_ifma->ifma_flags |= IFMA_F_ENQUEUED; CK_STAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma, ifma_link); } else ll_ifma->ifma_refcount++; ifma->ifma_llifma = ll_ifma; } /* * We now have a new multicast address, ifma, and possibly a new or * referenced link layer address. Add the primary address to the * ifnet address list. */ ifma->ifma_flags |= IFMA_F_ENQUEUED; CK_STAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); if (retifma != NULL) *retifma = ifma; /* * Must generate the message while holding the lock so that 'ifma' * pointer is still valid. */ rt_newmaddrmsg(RTM_NEWMADDR, ifma); IF_ADDR_WUNLOCK(ifp); /* * We are certain we have added something, so call down to the * interface to let them know about it. */ if (ifp->if_ioctl != NULL) { (void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0); } if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl)) link_free_sdl(llsa); return (0); free_llsa_out: if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl)) link_free_sdl(llsa); unlock_out: IF_ADDR_WUNLOCK(ifp); return (error); } /* * Delete a multicast group membership by network-layer group address. * * Returns ENOENT if the entry could not be found. If ifp no longer * exists, results are undefined. This entry point should only be used * from subsystems which do appropriate locking to hold ifp for the * duration of the call. * Network-layer protocol domains must use if_delmulti_ifma(). */ int if_delmulti(struct ifnet *ifp, struct sockaddr *sa) { struct ifmultiaddr *ifma; int lastref; #ifdef INVARIANTS struct ifnet *oifp; IFNET_RLOCK_NOSLEEP(); CK_STAILQ_FOREACH(oifp, &V_ifnet, if_link) if (ifp == oifp) break; if (ifp != oifp) ifp = NULL; IFNET_RUNLOCK_NOSLEEP(); KASSERT(ifp != NULL, ("%s: ifnet went away", __func__)); #endif if (ifp == NULL) return (ENOENT); IF_ADDR_WLOCK(ifp); lastref = 0; ifma = if_findmulti(ifp, sa); if (ifma != NULL) lastref = if_delmulti_locked(ifp, ifma, 0); IF_ADDR_WUNLOCK(ifp); if (ifma == NULL) return (ENOENT); if (lastref && ifp->if_ioctl != NULL) { (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); } return (0); } /* * Delete all multicast group membership for an interface. * Should be used to quickly flush all multicast filters. */ void if_delallmulti(struct ifnet *ifp) { struct ifmultiaddr *ifma; struct ifmultiaddr *next; IF_ADDR_WLOCK(ifp); CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) if_delmulti_locked(ifp, ifma, 0); IF_ADDR_WUNLOCK(ifp); } void if_delmulti_ifma(struct ifmultiaddr *ifma) { if_delmulti_ifma_flags(ifma, 0); } /* * Delete a multicast group membership by group membership pointer. * Network-layer protocol domains must use this routine. * * It is safe to call this routine if the ifp disappeared. */ void if_delmulti_ifma_flags(struct ifmultiaddr *ifma, int flags) { struct ifnet *ifp; int lastref; MCDPRINTF("%s freeing ifma: %p\n", __func__, ifma); #ifdef INET IN_MULTI_LIST_UNLOCK_ASSERT(); #endif ifp = ifma->ifma_ifp; #ifdef DIAGNOSTIC if (ifp == NULL) { printf("%s: ifma_ifp seems to be detached\n", __func__); } else { struct ifnet *oifp; IFNET_RLOCK_NOSLEEP(); CK_STAILQ_FOREACH(oifp, &V_ifnet, if_link) if (ifp == oifp) break; if (ifp != oifp) ifp = NULL; IFNET_RUNLOCK_NOSLEEP(); } #endif /* * If and only if the ifnet instance exists: Acquire the address lock. */ if (ifp != NULL) IF_ADDR_WLOCK(ifp); lastref = if_delmulti_locked(ifp, ifma, flags); if (ifp != NULL) { /* * If and only if the ifnet instance exists: * Release the address lock. * If the group was left: update the hardware hash filter. */ IF_ADDR_WUNLOCK(ifp); if (lastref && ifp->if_ioctl != NULL) { (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); } } } /* * Perform deletion of network-layer and/or link-layer multicast address. * * Return 0 if the reference count was decremented. * Return 1 if the final reference was released, indicating that the * hardware hash filter should be reprogrammed. */ static int if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching) { struct ifmultiaddr *ll_ifma; if (ifp != NULL && ifma->ifma_ifp != NULL) { KASSERT(ifma->ifma_ifp == ifp, ("%s: inconsistent ifp %p", __func__, ifp)); IF_ADDR_WLOCK_ASSERT(ifp); } ifp = ifma->ifma_ifp; MCDPRINTF("%s freeing %p from %s \n", __func__, ifma, ifp ? ifp->if_xname : ""); /* * If the ifnet is detaching, null out references to ifnet, * so that upper protocol layers will notice, and not attempt * to obtain locks for an ifnet which no longer exists. The * routing socket announcement must happen before the ifnet * instance is detached from the system. */ if (detaching) { #ifdef DIAGNOSTIC printf("%s: detaching ifnet instance %p\n", __func__, ifp); #endif /* * ifp may already be nulled out if we are being reentered * to delete the ll_ifma. */ if (ifp != NULL) { rt_newmaddrmsg(RTM_DELMADDR, ifma); ifma->ifma_ifp = NULL; } } if (--ifma->ifma_refcount > 0) return 0; if (ifp != NULL && detaching == 0 && (ifma->ifma_flags & IFMA_F_ENQUEUED)) { CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link); ifma->ifma_flags &= ~IFMA_F_ENQUEUED; } /* * If this ifma is a network-layer ifma, a link-layer ifma may * have been associated with it. Release it first if so. */ ll_ifma = ifma->ifma_llifma; if (ll_ifma != NULL) { KASSERT(ifma->ifma_lladdr != NULL, ("%s: llifma w/o lladdr", __func__)); if (detaching) ll_ifma->ifma_ifp = NULL; /* XXX */ if (--ll_ifma->ifma_refcount == 0) { if (ifp != NULL) { if (ll_ifma->ifma_flags & IFMA_F_ENQUEUED) { CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifmultiaddr, ifma_link); ll_ifma->ifma_flags &= ~IFMA_F_ENQUEUED; } } if_freemulti(ll_ifma); } } #ifdef INVARIANTS if (ifp) { struct ifmultiaddr *ifmatmp; CK_STAILQ_FOREACH(ifmatmp, &ifp->if_multiaddrs, ifma_link) MPASS(ifma != ifmatmp); } #endif if_freemulti(ifma); /* * The last reference to this instance of struct ifmultiaddr * was released; the hardware should be notified of this change. */ return 1; } /* * Set the link layer address on an interface. * * At this time we only support certain types of interfaces, * and we don't allow the length of the address to change. * * Set noinline to be dtrace-friendly */ __noinline int if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) { struct sockaddr_dl *sdl; struct ifaddr *ifa; struct ifreq ifr; int rc; rc = 0; NET_EPOCH_ENTER(); ifa = ifp->if_addr; if (ifa == NULL) { rc = EINVAL; goto out; } sdl = (struct sockaddr_dl *)ifa->ifa_addr; if (sdl == NULL) { rc = EINVAL; goto out; } if (len != sdl->sdl_alen) { /* don't allow length to change */ rc = EINVAL; goto out; } switch (ifp->if_type) { case IFT_ETHER: case IFT_XETHER: case IFT_L2VLAN: case IFT_BRIDGE: case IFT_IEEE8023ADLAG: bcopy(lladdr, LLADDR(sdl), len); break; default: rc = ENODEV; goto out; } /* * If the interface is already up, we need * to re-init it in order to reprogram its * address filter. */ NET_EPOCH_EXIT(); if ((ifp->if_flags & IFF_UP) != 0) { if (ifp->if_ioctl) { ifp->if_flags &= ~IFF_UP; ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); ifp->if_flags |= IFF_UP; ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); } } EVENTHANDLER_INVOKE(iflladdr_event, ifp); return (0); out: NET_EPOCH_EXIT(); return (rc); } /* * Compat function for handling basic encapsulation requests. * Not converted stacks (FDDI, IB, ..) supports traditional * output model: ARP (and other similar L2 protocols) are handled * inside output routine, arpresolve/nd6_resolve() returns MAC * address instead of full prepend. * * This function creates calculated header==MAC for IPv4/IPv6 and * returns EAFNOSUPPORT (which is then handled in ARP code) for other * address families. */ static int if_requestencap_default(struct ifnet *ifp, struct if_encap_req *req) { if (req->rtype != IFENCAP_LL) return (EOPNOTSUPP); if (req->bufsize < req->lladdr_len) return (ENOMEM); switch (req->family) { case AF_INET: case AF_INET6: break; default: return (EAFNOSUPPORT); } /* Copy lladdr to storage as is */ memmove(req->buf, req->lladdr, req->lladdr_len); req->bufsize = req->lladdr_len; req->lladdr_off = 0; return (0); } /* * Tunnel interfaces can nest, also they may cause infinite recursion * calls when misconfigured. We'll prevent this by detecting loops. * High nesting level may cause stack exhaustion. We'll prevent this * by introducing upper limit. * * Return 0, if tunnel nesting count is equal or less than limit. */ int if_tunnel_check_nesting(struct ifnet *ifp, struct mbuf *m, uint32_t cookie, int limit) { struct m_tag *mtag; int count; count = 1; mtag = NULL; while ((mtag = m_tag_locate(m, cookie, 0, mtag)) != NULL) { if (*(struct ifnet **)(mtag + 1) == ifp) { log(LOG_NOTICE, "%s: loop detected\n", if_name(ifp)); return (EIO); } count++; } if (count > limit) { log(LOG_NOTICE, "%s: if_output recursively called too many times(%d)\n", if_name(ifp), count); return (EIO); } mtag = m_tag_alloc(cookie, 0, sizeof(struct ifnet *), M_NOWAIT); if (mtag == NULL) return (ENOMEM); *(struct ifnet **)(mtag + 1) = ifp; m_tag_prepend(m, mtag); return (0); } /* * Get the link layer address that was read from the hardware at attach. * * This is only set by Ethernet NICs (IFT_ETHER), but laggX interfaces re-type * their component interfaces as IFT_IEEE8023ADLAG. */ int if_gethwaddr(struct ifnet *ifp, struct ifreq *ifr) { if (ifp->if_hw_addr == NULL) return (ENODEV); switch (ifp->if_type) { case IFT_ETHER: case IFT_IEEE8023ADLAG: bcopy(ifp->if_hw_addr, ifr->ifr_addr.sa_data, ifp->if_addrlen); return (0); default: return (ENODEV); } } /* * The name argument must be a pointer to storage which will last as * long as the interface does. For physical devices, the result of * device_get_name(dev) is a good choice and for pseudo-devices a * static string works well. */ void if_initname(struct ifnet *ifp, const char *name, int unit) { ifp->if_dname = name; ifp->if_dunit = unit; if (unit != IF_DUNIT_NONE) snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); else strlcpy(ifp->if_xname, name, IFNAMSIZ); } int if_printf(struct ifnet *ifp, const char *fmt, ...) { char if_fmt[256]; va_list ap; snprintf(if_fmt, sizeof(if_fmt), "%s: %s", ifp->if_xname, fmt); va_start(ap, fmt); vlog(LOG_INFO, if_fmt, ap); va_end(ap); return (0); } void if_start(struct ifnet *ifp) { (*(ifp)->if_start)(ifp); } /* * Backwards compatibility interface for drivers * that have not implemented it */ static int if_transmit(struct ifnet *ifp, struct mbuf *m) { int error; IFQ_HANDOFF(ifp, m, error); return (error); } static void if_input_default(struct ifnet *ifp __unused, struct mbuf *m) { m_freem(m); } int if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust) { int active = 0; IF_LOCK(ifq); if (_IF_QFULL(ifq)) { IF_UNLOCK(ifq); if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); m_freem(m); return (0); } if (ifp != NULL) { if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len + adjust); if (m->m_flags & (M_BCAST|M_MCAST)) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); active = ifp->if_drv_flags & IFF_DRV_OACTIVE; } _IF_ENQUEUE(ifq, m); IF_UNLOCK(ifq); if (ifp != NULL && !active) (*(ifp)->if_start)(ifp); return (1); } void if_register_com_alloc(u_char type, if_com_alloc_t *a, if_com_free_t *f) { KASSERT(if_com_alloc[type] == NULL, ("if_register_com_alloc: %d already registered", type)); KASSERT(if_com_free[type] == NULL, ("if_register_com_alloc: %d free already registered", type)); if_com_alloc[type] = a; if_com_free[type] = f; } void if_deregister_com_alloc(u_char type) { KASSERT(if_com_alloc[type] != NULL, ("if_deregister_com_alloc: %d not registered", type)); KASSERT(if_com_free[type] != NULL, ("if_deregister_com_alloc: %d free not registered", type)); if_com_alloc[type] = NULL; if_com_free[type] = NULL; } /* API for driver access to network stack owned ifnet.*/ uint64_t if_setbaudrate(struct ifnet *ifp, uint64_t baudrate) { uint64_t oldbrate; oldbrate = ifp->if_baudrate; ifp->if_baudrate = baudrate; return (oldbrate); } uint64_t if_getbaudrate(if_t ifp) { return (((struct ifnet *)ifp)->if_baudrate); } int if_setcapabilities(if_t ifp, int capabilities) { ((struct ifnet *)ifp)->if_capabilities = capabilities; return (0); } int if_setcapabilitiesbit(if_t ifp, int setbit, int clearbit) { ((struct ifnet *)ifp)->if_capabilities |= setbit; ((struct ifnet *)ifp)->if_capabilities &= ~clearbit; return (0); } int if_getcapabilities(if_t ifp) { return ((struct ifnet *)ifp)->if_capabilities; } int if_setcapenable(if_t ifp, int capabilities) { ((struct ifnet *)ifp)->if_capenable = capabilities; return (0); } int if_setcapenablebit(if_t ifp, int setcap, int clearcap) { if(setcap) ((struct ifnet *)ifp)->if_capenable |= setcap; if(clearcap) ((struct ifnet *)ifp)->if_capenable &= ~clearcap; return (0); } const char * if_getdname(if_t ifp) { return ((struct ifnet *)ifp)->if_dname; } int if_togglecapenable(if_t ifp, int togglecap) { ((struct ifnet *)ifp)->if_capenable ^= togglecap; return (0); } int if_getcapenable(if_t ifp) { return ((struct ifnet *)ifp)->if_capenable; } /* * This is largely undesirable because it ties ifnet to a device, but does * provide flexiblity for an embedded product vendor. Should be used with * the understanding that it violates the interface boundaries, and should be * a last resort only. */ int if_setdev(if_t ifp, void *dev) { return (0); } int if_setdrvflagbits(if_t ifp, int set_flags, int clear_flags) { ((struct ifnet *)ifp)->if_drv_flags |= set_flags; ((struct ifnet *)ifp)->if_drv_flags &= ~clear_flags; return (0); } int if_getdrvflags(if_t ifp) { return ((struct ifnet *)ifp)->if_drv_flags; } int if_setdrvflags(if_t ifp, int flags) { ((struct ifnet *)ifp)->if_drv_flags = flags; return (0); } int if_setflags(if_t ifp, int flags) { ((struct ifnet *)ifp)->if_flags = flags; return (0); } int if_setflagbits(if_t ifp, int set, int clear) { ((struct ifnet *)ifp)->if_flags |= set; ((struct ifnet *)ifp)->if_flags &= ~clear; return (0); } int if_getflags(if_t ifp) { return ((struct ifnet *)ifp)->if_flags; } int if_clearhwassist(if_t ifp) { ((struct ifnet *)ifp)->if_hwassist = 0; return (0); } int if_sethwassistbits(if_t ifp, int toset, int toclear) { ((struct ifnet *)ifp)->if_hwassist |= toset; ((struct ifnet *)ifp)->if_hwassist &= ~toclear; return (0); } int if_sethwassist(if_t ifp, int hwassist_bit) { ((struct ifnet *)ifp)->if_hwassist = hwassist_bit; return (0); } int if_gethwassist(if_t ifp) { return ((struct ifnet *)ifp)->if_hwassist; } int if_setmtu(if_t ifp, int mtu) { ((struct ifnet *)ifp)->if_mtu = mtu; return (0); } int if_getmtu(if_t ifp) { return ((struct ifnet *)ifp)->if_mtu; } int if_getmtu_family(if_t ifp, int family) { struct domain *dp; for (dp = domains; dp; dp = dp->dom_next) { if (dp->dom_family == family && dp->dom_ifmtu != NULL) return (dp->dom_ifmtu((struct ifnet *)ifp)); } return (((struct ifnet *)ifp)->if_mtu); } int if_setsoftc(if_t ifp, void *softc) { ((struct ifnet *)ifp)->if_softc = softc; return (0); } void * if_getsoftc(if_t ifp) { return ((struct ifnet *)ifp)->if_softc; } void if_setrcvif(struct mbuf *m, if_t ifp) { m->m_pkthdr.rcvif = (struct ifnet *)ifp; } void if_setvtag(struct mbuf *m, uint16_t tag) { m->m_pkthdr.ether_vtag = tag; } uint16_t if_getvtag(struct mbuf *m) { return (m->m_pkthdr.ether_vtag); } int if_sendq_empty(if_t ifp) { return IFQ_DRV_IS_EMPTY(&((struct ifnet *)ifp)->if_snd); } struct ifaddr * if_getifaddr(if_t ifp) { return ((struct ifnet *)ifp)->if_addr; } int if_getamcount(if_t ifp) { return ((struct ifnet *)ifp)->if_amcount; } int if_setsendqready(if_t ifp) { IFQ_SET_READY(&((struct ifnet *)ifp)->if_snd); return (0); } int if_setsendqlen(if_t ifp, int tx_desc_count) { IFQ_SET_MAXLEN(&((struct ifnet *)ifp)->if_snd, tx_desc_count); ((struct ifnet *)ifp)->if_snd.ifq_drv_maxlen = tx_desc_count; return (0); } int if_vlantrunkinuse(if_t ifp) { return ((struct ifnet *)ifp)->if_vlantrunk != NULL?1:0; } int if_input(if_t ifp, struct mbuf* sendmp) { (*((struct ifnet *)ifp)->if_input)((struct ifnet *)ifp, sendmp); return (0); } /* XXX */ #ifndef ETH_ADDR_LEN #define ETH_ADDR_LEN 6 #endif int if_setupmultiaddr(if_t ifp, void *mta, int *cnt, int max) { struct ifmultiaddr *ifma; uint8_t *lmta = (uint8_t *)mta; int mcnt = 0; CK_STAILQ_FOREACH(ifma, &((struct ifnet *)ifp)->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == max) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &lmta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); mcnt++; } *cnt = mcnt; return (0); } int if_multiaddr_array(if_t ifp, void *mta, int *cnt, int max) { int error; if_maddr_rlock(ifp); error = if_setupmultiaddr(ifp, mta, cnt, max); if_maddr_runlock(ifp); return (error); } int if_multiaddr_count(if_t ifp, int max) { struct ifmultiaddr *ifma; int count; count = 0; if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &((struct ifnet *)ifp)->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; count++; if (count == max) break; } if_maddr_runlock(ifp); return (count); } int if_multi_apply(struct ifnet *ifp, int (*filter)(void *, struct ifmultiaddr *, int), void *arg) { struct ifmultiaddr *ifma; int cnt = 0; if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) cnt += filter(arg, ifma, cnt); if_maddr_runlock(ifp); return (cnt); } struct mbuf * if_dequeue(if_t ifp) { struct mbuf *m; IFQ_DRV_DEQUEUE(&((struct ifnet *)ifp)->if_snd, m); return (m); } int if_sendq_prepend(if_t ifp, struct mbuf *m) { IFQ_DRV_PREPEND(&((struct ifnet *)ifp)->if_snd, m); return (0); } int if_setifheaderlen(if_t ifp, int len) { ((struct ifnet *)ifp)->if_hdrlen = len; return (0); } caddr_t if_getlladdr(if_t ifp) { return (IF_LLADDR((struct ifnet *)ifp)); } void * if_gethandle(u_char type) { return (if_alloc(type)); } void if_bpfmtap(if_t ifh, struct mbuf *m) { struct ifnet *ifp = (struct ifnet *)ifh; BPF_MTAP(ifp, m); } void if_etherbpfmtap(if_t ifh, struct mbuf *m) { struct ifnet *ifp = (struct ifnet *)ifh; ETHER_BPF_MTAP(ifp, m); } void if_vlancap(if_t ifh) { struct ifnet *ifp = (struct ifnet *)ifh; VLAN_CAPABILITIES(ifp); } int if_sethwtsomax(if_t ifp, u_int if_hw_tsomax) { ((struct ifnet *)ifp)->if_hw_tsomax = if_hw_tsomax; return (0); } int if_sethwtsomaxsegcount(if_t ifp, u_int if_hw_tsomaxsegcount) { ((struct ifnet *)ifp)->if_hw_tsomaxsegcount = if_hw_tsomaxsegcount; return (0); } int if_sethwtsomaxsegsize(if_t ifp, u_int if_hw_tsomaxsegsize) { ((struct ifnet *)ifp)->if_hw_tsomaxsegsize = if_hw_tsomaxsegsize; return (0); } u_int if_gethwtsomax(if_t ifp) { return (((struct ifnet *)ifp)->if_hw_tsomax); } u_int if_gethwtsomaxsegcount(if_t ifp) { return (((struct ifnet *)ifp)->if_hw_tsomaxsegcount); } u_int if_gethwtsomaxsegsize(if_t ifp) { return (((struct ifnet *)ifp)->if_hw_tsomaxsegsize); } void if_setinitfn(if_t ifp, void (*init_fn)(void *)) { ((struct ifnet *)ifp)->if_init = init_fn; } void if_setioctlfn(if_t ifp, int (*ioctl_fn)(if_t, u_long, caddr_t)) { ((struct ifnet *)ifp)->if_ioctl = (void *)ioctl_fn; } void if_setstartfn(if_t ifp, void (*start_fn)(if_t)) { ((struct ifnet *)ifp)->if_start = (void *)start_fn; } void if_settransmitfn(if_t ifp, if_transmit_fn_t start_fn) { ((struct ifnet *)ifp)->if_transmit = start_fn; } void if_setqflushfn(if_t ifp, if_qflush_fn_t flush_fn) { ((struct ifnet *)ifp)->if_qflush = flush_fn; } void if_setgetcounterfn(if_t ifp, if_get_counter_t fn) { ifp->if_get_counter = fn; } /* Revisit these - These are inline functions originally. */ int drbr_inuse_drv(if_t ifh, struct buf_ring *br) { return drbr_inuse(ifh, br); } struct mbuf* drbr_dequeue_drv(if_t ifh, struct buf_ring *br) { return drbr_dequeue(ifh, br); } int drbr_needs_enqueue_drv(if_t ifh, struct buf_ring *br) { return drbr_needs_enqueue(ifh, br); } int drbr_enqueue_drv(if_t ifh, struct buf_ring *br, struct mbuf *m) { return drbr_enqueue(ifh, br, m); } Index: projects/clang700-import/sys/net/if_var.h =================================================================== --- projects/clang700-import/sys/net/if_var.h (revision 340426) +++ projects/clang700-import/sys/net/if_var.h (revision 340427) @@ -1,782 +1,780 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: @(#)if.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _NET_IF_VAR_H_ #define _NET_IF_VAR_H_ /* * Structures defining a network interface, providing a packet * transport mechanism (ala level 0 of the PUP protocols). * * Each interface accepts output datagrams of a specified maximum * length, and provides higher level routines with input datagrams * received from its medium. * * Output occurs when the routine if_output is called, with three parameters: * (*ifp->if_output)(ifp, m, dst, rt) * Here m is the mbuf chain to be sent and dst is the destination address. * The output routine encapsulates the supplied datagram if necessary, * and then transmits it on its medium. * * On input, each interface unwraps the data received by it, and either * places it on the input queue of an internetwork datagram routine * and posts the associated software interrupt, or passes the datagram to a raw * packet input routine. * * Routines exist for locating interfaces by their addresses * or for locating an interface on a certain network, as well as more general * routing and gateway routines maintaining information used to locate * interfaces. These routines live in the files if.c and route.c */ struct rtentry; /* ifa_rtrequest */ struct rt_addrinfo; /* ifa_rtrequest */ struct socket; struct carp_if; struct carp_softc; struct ifvlantrunk; struct route; /* if_output */ struct vnet; struct ifmedia; struct netmap_adapter; struct netdump_methods; #ifdef _KERNEL #include /* ifqueue only? */ #include #include #endif /* _KERNEL */ #include #include #include #include /* XXX */ #include /* struct ifqueue */ #include /* XXX */ #include /* XXX */ #include /* if_link_task */ #define IF_DUNIT_NONE -1 #include CK_STAILQ_HEAD(ifnethead, ifnet); /* we use TAILQs so that the order of */ CK_STAILQ_HEAD(ifaddrhead, ifaddr); /* instantiation is preserved in the list */ CK_STAILQ_HEAD(ifmultihead, ifmultiaddr); CK_STAILQ_HEAD(ifgrouphead, ifg_group); #ifdef _KERNEL VNET_DECLARE(struct pfil_head, link_pfil_hook); /* packet filter hooks */ #define V_link_pfil_hook VNET(link_pfil_hook) #define HHOOK_IPSEC_INET 0 #define HHOOK_IPSEC_INET6 1 #define HHOOK_IPSEC_COUNT 2 VNET_DECLARE(struct hhook_head *, ipsec_hhh_in[HHOOK_IPSEC_COUNT]); VNET_DECLARE(struct hhook_head *, ipsec_hhh_out[HHOOK_IPSEC_COUNT]); #define V_ipsec_hhh_in VNET(ipsec_hhh_in) #define V_ipsec_hhh_out VNET(ipsec_hhh_out) extern epoch_t net_epoch_preempt; extern epoch_t net_epoch; #endif /* _KERNEL */ typedef enum { IFCOUNTER_IPACKETS = 0, IFCOUNTER_IERRORS, IFCOUNTER_OPACKETS, IFCOUNTER_OERRORS, IFCOUNTER_COLLISIONS, IFCOUNTER_IBYTES, IFCOUNTER_OBYTES, IFCOUNTER_IMCASTS, IFCOUNTER_OMCASTS, IFCOUNTER_IQDROPS, IFCOUNTER_OQDROPS, IFCOUNTER_NOPROTO, IFCOUNTERS /* Array size. */ } ift_counter; typedef struct ifnet * if_t; typedef void (*if_start_fn_t)(if_t); typedef int (*if_ioctl_fn_t)(if_t, u_long, caddr_t); typedef void (*if_init_fn_t)(void *); typedef void (*if_qflush_fn_t)(if_t); typedef int (*if_transmit_fn_t)(if_t, struct mbuf *); typedef uint64_t (*if_get_counter_t)(if_t, ift_counter); struct ifnet_hw_tsomax { u_int tsomaxbytes; /* TSO total burst length limit in bytes */ u_int tsomaxsegcount; /* TSO maximum segment count */ u_int tsomaxsegsize; /* TSO maximum segment size in bytes */ }; /* Interface encap request types */ typedef enum { IFENCAP_LL = 1 /* pre-calculate link-layer header */ } ife_type; /* * The structure below allows to request various pre-calculated L2/L3 headers * for different media. Requests varies by type (rtype field). * * IFENCAP_LL type: pre-calculates link header based on address family * and destination lladdr. * * Input data fields: * buf: pointer to destination buffer * bufsize: buffer size * flags: IFENCAP_FLAG_BROADCAST if destination is broadcast * family: address family defined by AF_ constant. * lladdr: pointer to link-layer address * lladdr_len: length of link-layer address * hdata: pointer to L3 header (optional, used for ARP requests). * Output data fields: * buf: encap data is stored here * bufsize: resulting encap length is stored here * lladdr_off: offset of link-layer address from encap hdr start * hdata: L3 header may be altered if necessary */ struct if_encap_req { u_char *buf; /* Destination buffer (w) */ size_t bufsize; /* size of provided buffer (r) */ ife_type rtype; /* request type (r) */ uint32_t flags; /* Request flags (r) */ int family; /* Address family AF_* (r) */ int lladdr_off; /* offset from header start (w) */ int lladdr_len; /* lladdr length (r) */ char *lladdr; /* link-level address pointer (r) */ char *hdata; /* Upper layer header data (rw) */ }; #define IFENCAP_FLAG_BROADCAST 0x02 /* Destination is broadcast */ /* * Network interface send tag support. The storage of "struct * m_snd_tag" comes from the network driver and it is free to allocate * as much additional space as it wants for its own use. */ struct m_snd_tag; #define IF_SND_TAG_TYPE_RATE_LIMIT 0 #define IF_SND_TAG_TYPE_UNLIMITED 1 #define IF_SND_TAG_TYPE_MAX 2 struct if_snd_tag_alloc_header { uint32_t type; /* send tag type, see IF_SND_TAG_XXX */ uint32_t flowid; /* mbuf hash value */ uint32_t flowtype; /* mbuf hash type */ }; struct if_snd_tag_alloc_rate_limit { struct if_snd_tag_alloc_header hdr; uint64_t max_rate; /* in bytes/s */ }; struct if_snd_tag_rate_limit_params { uint64_t max_rate; /* in bytes/s */ uint32_t queue_level; /* 0 (empty) .. 65535 (full) */ #define IF_SND_QUEUE_LEVEL_MIN 0 #define IF_SND_QUEUE_LEVEL_MAX 65535 uint32_t reserved; /* padding */ }; union if_snd_tag_alloc_params { struct if_snd_tag_alloc_header hdr; struct if_snd_tag_alloc_rate_limit rate_limit; struct if_snd_tag_alloc_rate_limit unlimited; }; union if_snd_tag_modify_params { struct if_snd_tag_rate_limit_params rate_limit; struct if_snd_tag_rate_limit_params unlimited; }; union if_snd_tag_query_params { struct if_snd_tag_rate_limit_params rate_limit; struct if_snd_tag_rate_limit_params unlimited; }; typedef int (if_snd_tag_alloc_t)(struct ifnet *, union if_snd_tag_alloc_params *, struct m_snd_tag **); typedef int (if_snd_tag_modify_t)(struct m_snd_tag *, union if_snd_tag_modify_params *); typedef int (if_snd_tag_query_t)(struct m_snd_tag *, union if_snd_tag_query_params *); typedef void (if_snd_tag_free_t)(struct m_snd_tag *); /* * Structure defining a network interface. */ struct ifnet { /* General book keeping of interface lists. */ CK_STAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained (CK_) */ LIST_ENTRY(ifnet) if_clones; /* interfaces of a cloner */ CK_STAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if (CK_) */ /* protected by if_addr_lock */ u_char if_alloctype; /* if_type at time of allocation */ /* Driver and protocol specific information that remains stable. */ void *if_softc; /* pointer to driver state */ void *if_llsoftc; /* link layer softc */ void *if_l2com; /* pointer to protocol bits */ const char *if_dname; /* driver name */ int if_dunit; /* unit or IF_DUNIT_NONE */ u_short if_index; /* numeric abbreviation for this if */ short if_index_reserved; /* spare space to grow if_index */ char if_xname[IFNAMSIZ]; /* external name (name + unit) */ char *if_description; /* interface description */ /* Variable fields that are touched by the stack and drivers. */ int if_flags; /* up/down, broadcast, etc. */ int if_drv_flags; /* driver-managed status flags */ int if_capabilities; /* interface features & capabilities */ int if_capenable; /* enabled features & capabilities */ void *if_linkmib; /* link-type-specific MIB data */ size_t if_linkmiblen; /* length of above data */ u_int if_refcount; /* reference count */ /* These fields are shared with struct if_data. */ uint8_t if_type; /* ethernet, tokenring, etc */ uint8_t if_addrlen; /* media address length */ uint8_t if_hdrlen; /* media header length */ uint8_t if_link_state; /* current link state */ uint32_t if_mtu; /* maximum transmission unit */ uint32_t if_metric; /* routing metric (external only) */ uint64_t if_baudrate; /* linespeed */ uint64_t if_hwassist; /* HW offload capabilities, see IFCAP */ time_t if_epoch; /* uptime at attach or stat reset */ struct timeval if_lastchange; /* time of last administrative change */ struct ifaltq if_snd; /* output queue (includes altq) */ struct task if_linktask; /* task for link change events */ /* Addresses of different protocol families assigned to this if. */ struct mtx if_addr_lock; /* lock to protect address lists */ /* * if_addrhead is the list of all addresses associated to * an interface. * Some code in the kernel assumes that first element * of the list has type AF_LINK, and contains sockaddr_dl * addresses which store the link-level address and the name * of the interface. * However, access to the AF_LINK address through this * field is deprecated. Use if_addr or ifaddr_byindex() instead. */ struct ifaddrhead if_addrhead; /* linked list of addresses per if */ struct ifmultihead if_multiaddrs; /* multicast addresses configured */ int if_amcount; /* number of all-multicast requests */ struct ifaddr *if_addr; /* pointer to link-level address */ void *if_hw_addr; /* hardware link-level address */ const u_int8_t *if_broadcastaddr; /* linklevel broadcast bytestring */ struct mtx if_afdata_lock; void *if_afdata[AF_MAX]; int if_afdata_initialized; /* Additional features hung off the interface. */ u_int if_fib; /* interface FIB */ struct vnet *if_vnet; /* pointer to network stack instance */ struct vnet *if_home_vnet; /* where this ifnet originates from */ struct ifvlantrunk *if_vlantrunk; /* pointer to 802.1q data */ struct bpf_if *if_bpf; /* packet filter structure */ int if_pcount; /* number of promiscuous listeners */ void *if_bridge; /* bridge glue */ void *if_lagg; /* lagg glue */ void *if_pf_kif; /* pf glue */ struct carp_if *if_carp; /* carp interface structure */ struct label *if_label; /* interface MAC label */ struct netmap_adapter *if_netmap; /* netmap(4) softc */ /* Various procedures of the layer2 encapsulation and drivers. */ int (*if_output) /* output routine (enqueue) */ (struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); void (*if_input) /* input routine (from h/w driver) */ (struct ifnet *, struct mbuf *); struct mbuf *(*if_bridge_input)(struct ifnet *, struct mbuf *); int (*if_bridge_output)(struct ifnet *, struct mbuf *, struct sockaddr *, struct rtentry *); void (*if_bridge_linkstate)(struct ifnet *ifp); if_start_fn_t if_start; /* initiate output routine */ if_ioctl_fn_t if_ioctl; /* ioctl routine */ if_init_fn_t if_init; /* Init routine */ int (*if_resolvemulti) /* validate/resolve multicast */ (struct ifnet *, struct sockaddr **, struct sockaddr *); if_qflush_fn_t if_qflush; /* flush any queue */ if_transmit_fn_t if_transmit; /* initiate output routine */ void (*if_reassign) /* reassign to vnet routine */ (struct ifnet *, struct vnet *, char *); if_get_counter_t if_get_counter; /* get counter values */ int (*if_requestencap) /* make link header from request */ (struct ifnet *, struct if_encap_req *); /* Statistics. */ counter_u64_t if_counters[IFCOUNTERS]; /* Stuff that's only temporary and doesn't belong here. */ /* * Network adapter TSO limits: * =========================== * * If the "if_hw_tsomax" field is zero the maximum segment * length limit does not apply. If the "if_hw_tsomaxsegcount" * or the "if_hw_tsomaxsegsize" field is zero the TSO segment * count limit does not apply. If all three fields are zero, * there is no TSO limit. * * NOTE: The TSO limits should reflect the values used in the * BUSDMA tag a network adapter is using to load a mbuf chain * for transmission. The TCP/IP network stack will subtract * space for all linklevel and protocol level headers and * ensure that the full mbuf chain passed to the network * adapter fits within the given limits. */ u_int if_hw_tsomax; /* TSO maximum size in bytes */ u_int if_hw_tsomaxsegcount; /* TSO maximum segment count */ u_int if_hw_tsomaxsegsize; /* TSO maximum segment size in bytes */ /* * Network adapter send tag support: */ if_snd_tag_alloc_t *if_snd_tag_alloc; if_snd_tag_modify_t *if_snd_tag_modify; if_snd_tag_query_t *if_snd_tag_query; if_snd_tag_free_t *if_snd_tag_free; /* Ethernet PCP */ uint8_t if_pcp; /* * Netdump hooks to be called while dumping. */ struct netdump_methods *if_netdump_methods; struct epoch_context if_epoch_ctx; - struct epoch_tracker if_addr_et; - struct epoch_tracker if_maddr_et; /* * Spare fields to be added before branching a stable branch, so * that structure can be enhanced without changing the kernel * binary interface. */ int if_ispare[4]; /* general use */ }; /* for compatibility with other BSDs */ #define if_name(ifp) ((ifp)->if_xname) /* * Locks for address lists on the network interface. */ #define IF_ADDR_LOCK_INIT(if) mtx_init(&(if)->if_addr_lock, "if_addr_lock", NULL, MTX_DEF) #define IF_ADDR_LOCK_DESTROY(if) mtx_destroy(&(if)->if_addr_lock) #define IF_ADDR_RLOCK(if) struct epoch_tracker if_addr_et; epoch_enter_preempt(net_epoch_preempt, &if_addr_et); #define IF_ADDR_RUNLOCK(if) epoch_exit_preempt(net_epoch_preempt, &if_addr_et); #define IF_ADDR_WLOCK(if) mtx_lock(&(if)->if_addr_lock) #define IF_ADDR_WUNLOCK(if) mtx_unlock(&(if)->if_addr_lock) #define IF_ADDR_LOCK_ASSERT(if) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(if)->if_addr_lock)) #define IF_ADDR_WLOCK_ASSERT(if) mtx_assert(&(if)->if_addr_lock, MA_OWNED) #define NET_EPOCH_ENTER() struct epoch_tracker nep_et; epoch_enter_preempt(net_epoch_preempt, &nep_et) #define NET_EPOCH_ENTER_ET(et) epoch_enter_preempt(net_epoch_preempt, &(et)) #define NET_EPOCH_EXIT() epoch_exit_preempt(net_epoch_preempt, &nep_et) #define NET_EPOCH_EXIT_ET(et) epoch_exit_preempt(net_epoch_preempt, &(et)) #define NET_EPOCH_WAIT() epoch_wait_preempt(net_epoch_preempt) /* * Function variations on locking macros intended to be used by loadable * kernel modules in order to divorce them from the internals of address list * locking. */ void if_addr_rlock(struct ifnet *ifp); /* if_addrhead */ void if_addr_runlock(struct ifnet *ifp); /* if_addrhead */ void if_maddr_rlock(if_t ifp); /* if_multiaddrs */ void if_maddr_runlock(if_t ifp); /* if_multiaddrs */ #ifdef _KERNEL #ifdef _SYS_EVENTHANDLER_H_ /* interface link layer address change event */ typedef void (*iflladdr_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(iflladdr_event, iflladdr_event_handler_t); /* interface address change event */ typedef void (*ifaddr_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifaddr_event, ifaddr_event_handler_t); typedef void (*ifaddr_event_ext_handler_t)(void *, struct ifnet *, struct ifaddr *, int); EVENTHANDLER_DECLARE(ifaddr_event_ext, ifaddr_event_ext_handler_t); #define IFADDR_EVENT_ADD 0 #define IFADDR_EVENT_DEL 1 /* new interface arrival event */ typedef void (*ifnet_arrival_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifnet_arrival_event, ifnet_arrival_event_handler_t); /* interface departure event */ typedef void (*ifnet_departure_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifnet_departure_event, ifnet_departure_event_handler_t); /* Interface link state change event */ typedef void (*ifnet_link_event_handler_t)(void *, struct ifnet *, int); EVENTHANDLER_DECLARE(ifnet_link_event, ifnet_link_event_handler_t); /* Interface up/down event */ #define IFNET_EVENT_UP 0 #define IFNET_EVENT_DOWN 1 #define IFNET_EVENT_PCP 2 /* priority code point, PCP */ typedef void (*ifnet_event_fn)(void *, struct ifnet *ifp, int event); EVENTHANDLER_DECLARE(ifnet_event, ifnet_event_fn); #endif /* _SYS_EVENTHANDLER_H_ */ /* * interface groups */ struct ifg_group { char ifg_group[IFNAMSIZ]; u_int ifg_refcnt; void *ifg_pf_kif; CK_STAILQ_HEAD(, ifg_member) ifg_members; /* (CK_) */ CK_STAILQ_ENTRY(ifg_group) ifg_next; /* (CK_) */ }; struct ifg_member { CK_STAILQ_ENTRY(ifg_member) ifgm_next; /* (CK_) */ struct ifnet *ifgm_ifp; }; struct ifg_list { struct ifg_group *ifgl_group; CK_STAILQ_ENTRY(ifg_list) ifgl_next; /* (CK_) */ }; #ifdef _SYS_EVENTHANDLER_H_ /* group attach event */ typedef void (*group_attach_event_handler_t)(void *, struct ifg_group *); EVENTHANDLER_DECLARE(group_attach_event, group_attach_event_handler_t); /* group detach event */ typedef void (*group_detach_event_handler_t)(void *, struct ifg_group *); EVENTHANDLER_DECLARE(group_detach_event, group_detach_event_handler_t); /* group change event */ typedef void (*group_change_event_handler_t)(void *, const char *); EVENTHANDLER_DECLARE(group_change_event, group_change_event_handler_t); #endif /* _SYS_EVENTHANDLER_H_ */ #define IF_AFDATA_LOCK_INIT(ifp) \ mtx_init(&(ifp)->if_afdata_lock, "if_afdata", NULL, MTX_DEF) #define IF_AFDATA_WLOCK(ifp) mtx_lock(&(ifp)->if_afdata_lock) #define IF_AFDATA_RLOCK(ifp) struct epoch_tracker if_afdata_et; epoch_enter_preempt(net_epoch_preempt, &if_afdata_et) #define IF_AFDATA_WUNLOCK(ifp) mtx_unlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_RUNLOCK(ifp) epoch_exit_preempt(net_epoch_preempt, &if_afdata_et) #define IF_AFDATA_LOCK(ifp) IF_AFDATA_WLOCK(ifp) #define IF_AFDATA_UNLOCK(ifp) IF_AFDATA_WUNLOCK(ifp) #define IF_AFDATA_TRYLOCK(ifp) mtx_trylock(&(ifp)->if_afdata_lock) #define IF_AFDATA_DESTROY(ifp) mtx_destroy(&(ifp)->if_afdata_lock) #define IF_AFDATA_LOCK_ASSERT(ifp) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(ifp)->if_afdata_lock)) #define IF_AFDATA_RLOCK_ASSERT(ifp) MPASS(in_epoch(net_epoch_preempt)); #define IF_AFDATA_WLOCK_ASSERT(ifp) mtx_assert(&(ifp)->if_afdata_lock, MA_OWNED) #define IF_AFDATA_UNLOCK_ASSERT(ifp) mtx_assert(&(ifp)->if_afdata_lock, MA_NOTOWNED) /* * 72 was chosen below because it is the size of a TCP/IP * header (40) + the minimum mss (32). */ #define IF_MINMTU 72 #define IF_MAXMTU 65535 #define TOEDEV(ifp) ((ifp)->if_llsoftc) /* * The ifaddr structure contains information about one address * of an interface. They are maintained by the different address families, * are allocated and attached when an address is set, and are linked * together so all addresses for an interface can be located. * * NOTE: a 'struct ifaddr' is always at the beginning of a larger * chunk of malloc'ed memory, where we store the three addresses * (ifa_addr, ifa_dstaddr and ifa_netmask) referenced here. */ struct ifaddr { struct sockaddr *ifa_addr; /* address of interface */ struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */ #define ifa_broadaddr ifa_dstaddr /* broadcast address interface */ struct sockaddr *ifa_netmask; /* used to determine subnet */ struct ifnet *ifa_ifp; /* back-pointer to interface */ struct carp_softc *ifa_carp; /* pointer to CARP data */ CK_STAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */ void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */ (int, struct rtentry *, struct rt_addrinfo *); u_short ifa_flags; /* mostly rt_flags for cloning */ #define IFA_ROUTE RTF_UP /* route installed */ #define IFA_RTSELF RTF_HOST /* loopback route to self installed */ u_int ifa_refcnt; /* references to this structure */ counter_u64_t ifa_ipackets; counter_u64_t ifa_opackets; counter_u64_t ifa_ibytes; counter_u64_t ifa_obytes; struct epoch_context ifa_epoch_ctx; }; struct ifaddr * ifa_alloc(size_t size, int flags); void ifa_free(struct ifaddr *ifa); void ifa_ref(struct ifaddr *ifa); /* * Multicast address structure. This is analogous to the ifaddr * structure except that it keeps track of multicast addresses. */ #define IFMA_F_ENQUEUED 0x1 struct ifmultiaddr { CK_STAILQ_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */ struct sockaddr *ifma_addr; /* address this membership is for */ struct sockaddr *ifma_lladdr; /* link-layer translation, if any */ struct ifnet *ifma_ifp; /* back-pointer to interface */ u_int ifma_refcount; /* reference count */ int ifma_flags; void *ifma_protospec; /* protocol-specific state, if any */ struct ifmultiaddr *ifma_llifma; /* pointer to ifma for ifma_lladdr */ struct epoch_context ifma_epoch_ctx; }; extern struct rwlock ifnet_rwlock; extern struct sx ifnet_sxlock; #define IFNET_WLOCK() do { \ sx_xlock(&ifnet_sxlock); \ rw_wlock(&ifnet_rwlock); \ } while (0) #define IFNET_WUNLOCK() do { \ rw_wunlock(&ifnet_rwlock); \ sx_xunlock(&ifnet_sxlock); \ } while (0) /* * To assert the ifnet lock, you must know not only whether it's for read or * write, but also whether it was acquired with sleep support or not. */ #define IFNET_RLOCK_ASSERT() sx_assert(&ifnet_sxlock, SA_SLOCKED) #define IFNET_RLOCK_NOSLEEP_ASSERT() MPASS(in_epoch(net_epoch_preempt)) #define IFNET_WLOCK_ASSERT() do { \ sx_assert(&ifnet_sxlock, SA_XLOCKED); \ rw_assert(&ifnet_rwlock, RA_WLOCKED); \ } while (0) #define IFNET_RLOCK() sx_slock(&ifnet_sxlock) #define IFNET_RLOCK_NOSLEEP() struct epoch_tracker ifnet_rlock_et; epoch_enter_preempt(net_epoch_preempt, &ifnet_rlock_et) #define IFNET_RUNLOCK() sx_sunlock(&ifnet_sxlock) #define IFNET_RUNLOCK_NOSLEEP() epoch_exit_preempt(net_epoch_preempt, &ifnet_rlock_et) /* * Look up an ifnet given its index; the _ref variant also acquires a * reference that must be freed using if_rele(). It is almost always a bug * to call ifnet_byindex() instead of ifnet_byindex_ref(). */ struct ifnet *ifnet_byindex(u_short idx); struct ifnet *ifnet_byindex_locked(u_short idx); struct ifnet *ifnet_byindex_ref(u_short idx); /* * Given the index, ifaddr_byindex() returns the one and only * link-level ifaddr for the interface. You are not supposed to use * it to traverse the list of addresses associated to the interface. */ struct ifaddr *ifaddr_byindex(u_short idx); VNET_DECLARE(struct ifnethead, ifnet); VNET_DECLARE(struct ifgrouphead, ifg_head); VNET_DECLARE(int, if_index); VNET_DECLARE(struct ifnet *, loif); /* first loopback interface */ #define V_ifnet VNET(ifnet) #define V_ifg_head VNET(ifg_head) #define V_if_index VNET(if_index) #define V_loif VNET(loif) #ifdef MCAST_VERBOSE #define MCDPRINTF printf #else #define MCDPRINTF(...) #endif int if_addgroup(struct ifnet *, const char *); int if_delgroup(struct ifnet *, const char *); int if_addmulti(struct ifnet *, struct sockaddr *, struct ifmultiaddr **); int if_allmulti(struct ifnet *, int); struct ifnet* if_alloc(u_char); void if_attach(struct ifnet *); void if_dead(struct ifnet *); int if_delmulti(struct ifnet *, struct sockaddr *); void if_delmulti_ifma(struct ifmultiaddr *); void if_delmulti_ifma_flags(struct ifmultiaddr *, int flags); void if_detach(struct ifnet *); void if_purgeaddrs(struct ifnet *); void if_delallmulti(struct ifnet *); void if_down(struct ifnet *); struct ifmultiaddr * if_findmulti(struct ifnet *, const struct sockaddr *); void if_freemulti(struct ifmultiaddr *ifma); void if_free(struct ifnet *); void if_initname(struct ifnet *, const char *, int); void if_link_state_change(struct ifnet *, int); int if_printf(struct ifnet *, const char *, ...) __printflike(2, 3); void if_ref(struct ifnet *); void if_rele(struct ifnet *); int if_setlladdr(struct ifnet *, const u_char *, int); int if_tunnel_check_nesting(struct ifnet *, struct mbuf *, uint32_t, int); void if_up(struct ifnet *); int ifioctl(struct socket *, u_long, caddr_t, struct thread *); int ifpromisc(struct ifnet *, int); struct ifnet *ifunit(const char *); struct ifnet *ifunit_ref(const char *); int ifa_add_loopback_route(struct ifaddr *, struct sockaddr *); int ifa_del_loopback_route(struct ifaddr *, struct sockaddr *); int ifa_switch_loopback_route(struct ifaddr *, struct sockaddr *); struct ifaddr *ifa_ifwithaddr(const struct sockaddr *); int ifa_ifwithaddr_check(const struct sockaddr *); struct ifaddr *ifa_ifwithbroadaddr(const struct sockaddr *, int); struct ifaddr *ifa_ifwithdstaddr(const struct sockaddr *, int); struct ifaddr *ifa_ifwithnet(const struct sockaddr *, int, int); struct ifaddr *ifa_ifwithroute(int, const struct sockaddr *, struct sockaddr *, u_int); struct ifaddr *ifaof_ifpforaddr(const struct sockaddr *, struct ifnet *); int ifa_preferred(struct ifaddr *, struct ifaddr *); int if_simloop(struct ifnet *ifp, struct mbuf *m, int af, int hlen); typedef void *if_com_alloc_t(u_char type, struct ifnet *ifp); typedef void if_com_free_t(void *com, u_char type); void if_register_com_alloc(u_char type, if_com_alloc_t *a, if_com_free_t *f); void if_deregister_com_alloc(u_char type); void if_data_copy(struct ifnet *, struct if_data *); uint64_t if_get_counter_default(struct ifnet *, ift_counter); void if_inc_counter(struct ifnet *, ift_counter, int64_t); #define IF_LLADDR(ifp) \ LLADDR((struct sockaddr_dl *)((ifp)->if_addr->ifa_addr)) uint64_t if_setbaudrate(if_t ifp, uint64_t baudrate); uint64_t if_getbaudrate(if_t ifp); int if_setcapabilities(if_t ifp, int capabilities); int if_setcapabilitiesbit(if_t ifp, int setbit, int clearbit); int if_getcapabilities(if_t ifp); int if_togglecapenable(if_t ifp, int togglecap); int if_setcapenable(if_t ifp, int capenable); int if_setcapenablebit(if_t ifp, int setcap, int clearcap); int if_getcapenable(if_t ifp); const char *if_getdname(if_t ifp); int if_setdev(if_t ifp, void *dev); int if_setdrvflagbits(if_t ifp, int if_setflags, int clear_flags); int if_getdrvflags(if_t ifp); int if_setdrvflags(if_t ifp, int flags); int if_clearhwassist(if_t ifp); int if_sethwassistbits(if_t ifp, int toset, int toclear); int if_sethwassist(if_t ifp, int hwassist_bit); int if_gethwassist(if_t ifp); int if_setsoftc(if_t ifp, void *softc); void *if_getsoftc(if_t ifp); int if_setflags(if_t ifp, int flags); int if_gethwaddr(if_t ifp, struct ifreq *); int if_setmtu(if_t ifp, int mtu); int if_getmtu(if_t ifp); int if_getmtu_family(if_t ifp, int family); int if_setflagbits(if_t ifp, int set, int clear); int if_getflags(if_t ifp); int if_sendq_empty(if_t ifp); int if_setsendqready(if_t ifp); int if_setsendqlen(if_t ifp, int tx_desc_count); int if_sethwtsomax(if_t ifp, u_int if_hw_tsomax); int if_sethwtsomaxsegcount(if_t ifp, u_int if_hw_tsomaxsegcount); int if_sethwtsomaxsegsize(if_t ifp, u_int if_hw_tsomaxsegsize); u_int if_gethwtsomax(if_t ifp); u_int if_gethwtsomaxsegcount(if_t ifp); u_int if_gethwtsomaxsegsize(if_t ifp); int if_input(if_t ifp, struct mbuf* sendmp); int if_sendq_prepend(if_t ifp, struct mbuf *m); struct mbuf *if_dequeue(if_t ifp); int if_setifheaderlen(if_t ifp, int len); void if_setrcvif(struct mbuf *m, if_t ifp); void if_setvtag(struct mbuf *m, u_int16_t tag); u_int16_t if_getvtag(struct mbuf *m); int if_vlantrunkinuse(if_t ifp); caddr_t if_getlladdr(if_t ifp); void *if_gethandle(u_char); void if_bpfmtap(if_t ifp, struct mbuf *m); void if_etherbpfmtap(if_t ifp, struct mbuf *m); void if_vlancap(if_t ifp); int if_setupmultiaddr(if_t ifp, void *mta, int *cnt, int max); int if_multiaddr_array(if_t ifp, void *mta, int *cnt, int max); int if_multiaddr_count(if_t ifp, int max); int if_multi_apply(struct ifnet *ifp, int (*filter)(void *, struct ifmultiaddr *, int), void *arg); int if_getamcount(if_t ifp); struct ifaddr * if_getifaddr(if_t ifp); /* Functions */ void if_setinitfn(if_t ifp, void (*)(void *)); void if_setioctlfn(if_t ifp, int (*)(if_t, u_long, caddr_t)); void if_setstartfn(if_t ifp, void (*)(if_t)); void if_settransmitfn(if_t ifp, if_transmit_fn_t); void if_setqflushfn(if_t ifp, if_qflush_fn_t); void if_setgetcounterfn(if_t ifp, if_get_counter_t); /* Revisit the below. These are inline functions originally */ int drbr_inuse_drv(if_t ifp, struct buf_ring *br); struct mbuf* drbr_dequeue_drv(if_t ifp, struct buf_ring *br); int drbr_needs_enqueue_drv(if_t ifp, struct buf_ring *br); int drbr_enqueue_drv(if_t ifp, struct buf_ring *br, struct mbuf *m); /* TSO */ void if_hw_tsomax_common(if_t ifp, struct ifnet_hw_tsomax *); int if_hw_tsomax_update(if_t ifp, struct ifnet_hw_tsomax *); /* accessors for struct ifreq */ void *ifr_data_get_ptr(void *ifrp); int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *); #ifdef DEVICE_POLLING enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS }; typedef int poll_handler_t(if_t ifp, enum poll_cmd cmd, int count); int ether_poll_register(poll_handler_t *h, if_t ifp); int ether_poll_deregister(if_t ifp); #endif /* DEVICE_POLLING */ #endif /* _KERNEL */ #include /* XXXAO: temporary unconditional include */ #endif /* !_NET_IF_VAR_H_ */ Index: projects/clang700-import/sys/powerpc/conf/GENERIC64 =================================================================== --- projects/clang700-import/sys/powerpc/conf/GENERIC64 (revision 340426) +++ projects/clang700-import/sys/powerpc/conf/GENERIC64 (revision 340427) @@ -1,238 +1,248 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/powerpc # # For more information on this file, please read the handbook section on # Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu AIM ident GENERIC machine powerpc powerpc64 makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Platform support options POWERMAC #NewWorld Apple PowerMacs options PS3 #Sony Playstation 3 options MAMBO #IBM Mambo Full System Simulator options PSERIES #PAPR-compliant systems (e.g. IBM p) options POWERNV #Non-virtualized OpenPOWER systems options FDT #Flattened Device Tree options SCHED_ULE #ULE scheduler options PREEMPTION #Enable kernel thread preemption options VIMAGE # Subsystem virtualization, e.g. VNET options INET #InterNETworking options INET6 #IPv6 communications protocols +options IPSEC # IP (v4/v6) security +options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 +options TCP_OFFLOAD # TCP offload +options TCP_BLACKBOX # Enhanced TCP event logging options TCP_HHOOK # hhook(9) framework for TCP options TCP_RFC7413 # TCP Fast Open options SCTP #Stream Control Transmission Protocol options FFS #Berkeley Fast Filesystem options SOFTUPDATES #Enable FFS soft updates support options UFS_ACL #Support for access control lists options UFS_DIRHASH #Improve performance on big directories options UFS_GJOURNAL #Enable gjournal-based UFS journaling options QUOTA #Enable disk quotas for UFS options MD_ROOT #MD is a potential root device options NFSCL #Network Filesystem Client options NFSD #Network Filesystem Server options NFSLOCKD #Network Lock Manager options NFS_ROOT #NFS usable as root device options MSDOSFS #MSDOS Filesystem options CD9660 #ISO 9660 Filesystem options PROCFS #Process filesystem (requires PSEUDOFS) options PSEUDOFS #Pseudo-filesystem framework options GEOM_PART_APM #Apple Partition Maps. options GEOM_PART_GPT #GUID Partition Tables. options GEOM_LABEL #Provides labelization options COMPAT_FREEBSD32 #Compatible with FreeBSD/powerpc binaries options COMPAT_FREEBSD5 #Compatible with FreeBSD5 options COMPAT_FREEBSD6 #Compatible with FreeBSD6 options COMPAT_FREEBSD7 #Compatible with FreeBSD7 options COMPAT_FREEBSD9 # Compatible with FreeBSD9 options COMPAT_FREEBSD10 # Compatible with FreeBSD10 options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options SCSI_DELAY=5000 #Delay (in ms) before probing SCSI options KTRACE #ktrace(1) syscall trace support options STACK #stack(9) support options SYSVSHM #SYSV-style shared memory options SYSVMSG #SYSV-style message queues options SYSVSEM #SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel +options RACCT # Resource accounting framework +options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default +options RCTL # Resource limits # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options DDB #Support DDB #options DEADLKRES #Enable the deadlock resolver options INVARIANTS #Enable calls of extra sanity checking options INVARIANT_SUPPORT #Extra sanity checks of internal structures, required by INVARIANTS options WITNESS #Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN #Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default # Kernel dump features. options EKCD # Support for encrypted kernel dumps options GZIO # gzip-compressed kernel and user dumps options ZSTDIO # zstd-compressed kernel and user dumps options NETDUMP # netdump(4) client support # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel # CPU frequency control device cpufreq # Standard busses device pci options PCI_HP # PCI-Express native HotPlug device agp # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA # NVM Express (NVMe) support device nvme # base NVMe driver options NVME_USE_NVD=0 # prefer the cam(4) based nda(4) driver device nvd # expose NVMe namespaces as disks, depends on nvme # SCSI Controllers device ahc # AHA2940 and onboard AIC7xxx devices options AHC_ALLOW_MEMIO # Attempt to use memory mapped I/O device isp # Qlogic family device ispfw # Firmware module for Qlogic host adapters device mpt # LSI-Logic MPT-Fusion device mps # LSI-Logic MPT-Fusion 2 device sym # NCR/Symbios/LSI Logic 53C8XX/53C1010/53C1510D # ATA/SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) +device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) +device ses # Enclosure Service (SES and SAF-TE) # vt is the default console driver, resembling an SCO console device vt # Core console driver device kbdmux # Serial (COM) ports device scc device uart device uart_z8530 # Ethernet hardware device em # Intel PRO/1000 Gigabit Ethernet Family device ix # Intel PRO/10GbE PCIE PF Ethernet Family device ixv # Intel PRO/10GbE PCIE VF Ethernet Family device glc # Sony Playstation 3 Ethernet device llan # IBM pSeries Virtual Ethernet device cxgbe # Chelsio 10/25G NIC # PCI Ethernet NICs that use the common MII bus controller code. device miibus # MII bus support device bge # Broadcom BCM570xx Gigabit Ethernet device gem # Sun GEM/Sun ERI/Apple GMAC device dc # DEC/Intel 21143 and various workalikes device fxp # Intel EtherExpress PRO/100B (82557, 82558) device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 # Pseudo devices. +device crypto # core crypto support device loop # Network loopback device random # Entropy device device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device ofwd # Open Firmware disks device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf #Berkeley packet filter # USB support options USB_DEBUG # enable debug msgs device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface device xhci # XHCI PCI->USB interface device usb # USB Bus (required) device uhid # "Human Interface Devices" device ukbd # Keyboard options KBD_INSTALL_CDEV # install a CDEV entry in /dev -device ulpt # Printer device umass # Disks/Mass storage - Requires scbus and da0 device ums # Mouse -device urio # Diamond Rio 500 MP3 player # USB Ethernet device aue # ADMtek USB Ethernet device axe # ASIX Electronics USB Ethernet device cdce # Generic USB over Ethernet device cue # CATC USB Ethernet device kue # Kawasaki LSI USB Ethernet # Wireless NIC cards options IEEE80211_SUPPORT_MESH options AH_SUPPORT_AR5416 # FireWire support device firewire # FireWire bus code device sbp # SCSI over FireWire (Requires scbus and da) device fwe # Ethernet over FireWire (non-standard!) # Misc device iicbus # I2C bus code device iic device kiic # Keywest I2C device ad7417 # PowerMac7,2 temperature sensor device ds1631 # PowerMac11,2 temperature sensor device ds1775 # PowerMac7,2 temperature sensor device fcu # Apple Fan Control Unit device max6690 # PowerMac7,2 temperature sensor device powermac_nvram # Open Firmware configuration NVRAM device smu # Apple System Management Unit device atibl # ATI-based backlight driver for PowerBooks/iBooks device nvbl # nVidia-based backlight driver for PowerBooks/iBooks # ADB support device adb device pmu # Sound support device sound # Generic sound driver (required) device snd_ai2s # Apple I2S audio device snd_uaudio # USB Audio +# Netmap provides direct access to TX/RX rings on supported NICs +device netmap # netmap(4) support Index: projects/clang700-import/sys/riscv/include/cpu.h =================================================================== --- projects/clang700-import/sys/riscv/include/cpu.h (revision 340426) +++ projects/clang700-import/sys/riscv/include/cpu.h (revision 340427) @@ -1,95 +1,95 @@ /*- * Copyright (c) 2015-2018 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_CPU_H_ #define _MACHINE_CPU_H_ #include +#include #include #define TRAPF_PC(tfp) ((tfp)->tf_ra) #define TRAPF_USERMODE(tfp) (((tfp)->tf_sstatus & SSTATUS_SPP) == 0) #define cpu_getstack(td) ((td)->td_frame->tf_sp) #define cpu_setstack(td, sp) ((td)->td_frame->tf_sp = (sp)) #define cpu_spinwait() /* nothing */ #define cpu_lock_delay() DELAY(1) #ifdef _KERNEL /* * 0x0000 CPU ID unimplemented * 0x0001 UC Berkeley Rocket repo * 0x0002­0x7FFE Reserved for open-source repos * 0x7FFF Reserved for extension * 0x8000 Reserved for anonymous source * 0x8001­0xFFFE Reserved for proprietary implementations * 0xFFFF Reserved for extension */ #define CPU_IMPL_SHIFT 0 #define CPU_IMPL_MASK (0xffff << CPU_IMPL_SHIFT) #define CPU_IMPL(mimpid) ((mimpid & CPU_IMPL_MASK) >> CPU_IMPL_SHIFT) #define CPU_IMPL_UNIMPLEMEN 0x0 #define CPU_IMPL_UCB_ROCKET 0x1 #define CPU_PART_SHIFT 62 #define CPU_PART_MASK (0x3ul << CPU_PART_SHIFT) #define CPU_PART(misa) ((misa & CPU_PART_MASK) >> CPU_PART_SHIFT) #define CPU_PART_RV32 0x1 #define CPU_PART_RV64 0x2 #define CPU_PART_RV128 0x3 extern char btext[]; extern char etext[]; void cpu_halt(void) __dead2; void cpu_reset(void) __dead2; void fork_trampoline(void); void identify_cpu(void); void swi_vm(void *v); static __inline uint64_t get_cyclecount(void) { - /* TODO: This is bogus */ - return (1); + return (rdcycle()); } #endif #endif /* !_MACHINE_CPU_H_ */ Index: projects/clang700-import/sys/riscv/include/cpufunc.h =================================================================== --- projects/clang700-import/sys/riscv/include/cpufunc.h (revision 340426) +++ projects/clang700-import/sys/riscv/include/cpufunc.h (revision 340427) @@ -1,112 +1,117 @@ /*- * Copyright (c) 2015-2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ static __inline void breakpoint(void) { __asm("ebreak"); } #ifdef _KERNEL #include static __inline register_t intr_disable(void) { uint64_t ret; __asm __volatile( "csrrci %0, sstatus, %1" : "=&r" (ret) : "i" (SSTATUS_SIE) ); return (ret & (SSTATUS_SIE)); } static __inline void intr_restore(register_t s) { __asm __volatile( "csrs sstatus, %0" :: "r" (s) ); } static __inline void intr_enable(void) { __asm __volatile( "csrsi sstatus, %0" :: "i" (SSTATUS_SIE) ); } /* NB: fence() is defined as a macro in . */ static __inline void fence_i(void) { __asm __volatile("fence.i" ::: "memory"); } static __inline void sfence_vma(void) { __asm __volatile("sfence.vma" ::: "memory"); } static __inline void sfence_vma_page(uintptr_t addr) { __asm __volatile("sfence.vma %0" :: "r" (addr) : "memory"); } +#define rdcycle() csr_read64(cycle) +#define rdtime() csr_read64(time) +#define rdinstret() csr_read64(instret) +#define rdhpmcounter(n) csr_read64(hpmcounter##n) + #define cpufunc_nullop() riscv_nullop() void riscv_nullop(void); #endif /* _KERNEL */ #endif /* _MACHINE_CPUFUNC_H_ */ Index: projects/clang700-import/sys/riscv/include/riscvreg.h =================================================================== --- projects/clang700-import/sys/riscv/include/riscvreg.h (revision 340426) +++ projects/clang700-import/sys/riscv/include/riscvreg.h (revision 340427) @@ -1,226 +1,245 @@ /*- * Copyright (c) 2015-2017 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_RISCVREG_H_ #define _MACHINE_RISCVREG_H_ #define EXCP_SHIFT 0 #define EXCP_MASK (0xf << EXCP_SHIFT) #define EXCP_MISALIGNED_FETCH 0 #define EXCP_FAULT_FETCH 1 #define EXCP_ILLEGAL_INSTRUCTION 2 #define EXCP_BREAKPOINT 3 #define EXCP_MISALIGNED_LOAD 4 #define EXCP_FAULT_LOAD 5 #define EXCP_MISALIGNED_STORE 6 #define EXCP_FAULT_STORE 7 #define EXCP_USER_ECALL 8 #define EXCP_SUPERVISOR_ECALL 9 #define EXCP_HYPERVISOR_ECALL 10 #define EXCP_MACHINE_ECALL 11 #define EXCP_INST_PAGE_FAULT 12 #define EXCP_LOAD_PAGE_FAULT 13 #define EXCP_STORE_PAGE_FAULT 15 #define EXCP_INTR (1ul << 63) #define SSTATUS_UIE (1 << 0) #define SSTATUS_SIE (1 << 1) #define SSTATUS_UPIE (1 << 4) #define SSTATUS_SPIE (1 << 5) #define SSTATUS_SPIE_SHIFT 5 #define SSTATUS_SPP (1 << 8) #define SSTATUS_SPP_SHIFT 8 #define SSTATUS_FS_SHIFT 13 #define SSTATUS_FS_OFF (0x0 << SSTATUS_FS_SHIFT) #define SSTATUS_FS_INITIAL (0x1 << SSTATUS_FS_SHIFT) #define SSTATUS_FS_CLEAN (0x2 << SSTATUS_FS_SHIFT) #define SSTATUS_FS_DIRTY (0x3 << SSTATUS_FS_SHIFT) #define SSTATUS_FS_MASK (0x3 << SSTATUS_FS_SHIFT) #define SSTATUS_XS_SHIFT 15 #define SSTATUS_XS_MASK (0x3 << SSTATUS_XS_SHIFT) #define SSTATUS_SUM (1 << 18) #define SSTATUS32_SD (1 << 63) #define SSTATUS64_SD (1 << 31) #define MSTATUS_UIE (1 << 0) #define MSTATUS_SIE (1 << 1) #define MSTATUS_HIE (1 << 2) #define MSTATUS_MIE (1 << 3) #define MSTATUS_UPIE (1 << 4) #define MSTATUS_SPIE (1 << 5) #define MSTATUS_SPIE_SHIFT 5 #define MSTATUS_HPIE (1 << 6) #define MSTATUS_MPIE (1 << 7) #define MSTATUS_MPIE_SHIFT 7 #define MSTATUS_SPP (1 << 8) #define MSTATUS_SPP_SHIFT 8 #define MSTATUS_HPP_MASK 0x3 #define MSTATUS_HPP_SHIFT 9 #define MSTATUS_MPP_MASK 0x3 #define MSTATUS_MPP_SHIFT 11 #define MSTATUS_FS_MASK 0x3 #define MSTATUS_FS_SHIFT 13 #define MSTATUS_XS_MASK 0x3 #define MSTATUS_XS_SHIFT 15 #define MSTATUS_MPRV (1 << 17) #define MSTATUS_PUM (1 << 18) #define MSTATUS_VM_MASK 0x1f #define MSTATUS_VM_SHIFT 24 #define MSTATUS_VM_MBARE 0 #define MSTATUS_VM_MBB 1 #define MSTATUS_VM_MBBID 2 #define MSTATUS_VM_SV32 8 #define MSTATUS_VM_SV39 9 #define MSTATUS_VM_SV48 10 #define MSTATUS_VM_SV57 11 #define MSTATUS_VM_SV64 12 #define MSTATUS32_SD (1 << 63) #define MSTATUS64_SD (1 << 31) #define MSTATUS_PRV_U 0 /* user */ #define MSTATUS_PRV_S 1 /* supervisor */ #define MSTATUS_PRV_H 2 /* hypervisor */ #define MSTATUS_PRV_M 3 /* machine */ #define MIE_USIE (1 << 0) #define MIE_SSIE (1 << 1) #define MIE_HSIE (1 << 2) #define MIE_MSIE (1 << 3) #define MIE_UTIE (1 << 4) #define MIE_STIE (1 << 5) #define MIE_HTIE (1 << 6) #define MIE_MTIE (1 << 7) #define MIP_USIP (1 << 0) #define MIP_SSIP (1 << 1) #define MIP_HSIP (1 << 2) #define MIP_MSIP (1 << 3) #define MIP_UTIP (1 << 4) #define MIP_STIP (1 << 5) #define MIP_HTIP (1 << 6) #define MIP_MTIP (1 << 7) #define SIE_USIE (1 << 0) #define SIE_SSIE (1 << 1) #define SIE_UTIE (1 << 4) #define SIE_STIE (1 << 5) #define SIE_UEIE (1 << 8) #define SIE_SEIE (1 << 9) #define MIP_SEIP (1 << 9) /* Note: sip register has no SIP_STIP bit in Spike simulator */ #define SIP_SSIP (1 << 1) #define SIP_STIP (1 << 5) #define SATP_PPN_S 0 #define SATP_PPN_M (0xfffffffffff << SATP_PPN_S) #define SATP_ASID_S 44 #define SATP_ASID_M (0xffff << SATP_ASID_S) #define SATP_MODE_S 60 #define SATP_MODE_M (0xf << SATP_MODE_S) #define SATP_MODE_SV39 (8ULL << SATP_MODE_S) #define SATP_MODE_SV48 (9ULL << SATP_MODE_S) #define XLEN 8 #define INSN_SIZE 4 #define INSN_C_SIZE 2 #define X_RA 1 #define X_SP 2 #define X_GP 3 #define X_TP 4 #define X_T0 5 #define X_T1 6 #define X_T2 7 #define X_T3 28 #define RD_SHIFT 7 #define RD_MASK (0x1f << RD_SHIFT) #define RS1_SHIFT 15 #define RS1_MASK (0x1f << RS1_SHIFT) #define RS1_SP (X_SP << RS1_SHIFT) #define RS2_SHIFT 20 #define RS2_MASK (0x1f << RS2_SHIFT) #define RS2_RA (X_RA << RS2_SHIFT) #define IMM_SHIFT 20 #define IMM_MASK (0xfff << IMM_SHIFT) #define RS2_C_SHIFT 2 #define RS2_C_MASK (0x1f << RS2_C_SHIFT) #define RS2_C_RA (X_RA << RS2_C_SHIFT) #define CSR_ZIMM(val) \ (__builtin_constant_p(val) && ((u_long)(val) < 32)) #define csr_swap(csr, val) \ ({ if (CSR_ZIMM(val)) \ __asm __volatile("csrrwi %0, " #csr ", %1" \ : "=r" (val) : "i" (val)); \ else \ __asm __volatile("csrrw %0, " #csr ", %1" \ : "=r" (val) : "r" (val)); \ val; \ }) #define csr_write(csr, val) \ ({ if (CSR_ZIMM(val)) \ __asm __volatile("csrwi " #csr ", %0" :: "i" (val)); \ else \ __asm __volatile("csrw " #csr ", %0" :: "r" (val)); \ }) #define csr_set(csr, val) \ ({ if (CSR_ZIMM(val)) \ __asm __volatile("csrsi " #csr ", %0" :: "i" (val)); \ else \ __asm __volatile("csrs " #csr ", %0" :: "r" (val)); \ }) #define csr_clear(csr, val) \ ({ if (CSR_ZIMM(val)) \ __asm __volatile("csrci " #csr ", %0" :: "i" (val)); \ else \ __asm __volatile("csrc " #csr ", %0" :: "r" (val)); \ }) #define csr_read(csr) \ ({ u_long val; \ __asm __volatile("csrr %0, " #csr : "=r" (val)); \ val; \ }) +#if __riscv_xlen == 32 +#define csr_read64(csr) \ +({ uint64_t val; \ + uint32_t high, low; \ + __asm __volatile("1: " \ + "csrr t0, " #csr "h\n" \ + "csrr %0, " #csr "\n" \ + "csrr %1, " #csr "h\n" \ + "bne t0, %1, 1b" \ + : "=r" (low), "=r" (high) \ + : \ + : "t0"); \ + val = (low | ((uint64_t)high << 32)); \ + val; \ +}) +#else +#define csr_read64(csr) ((uint64_t)csr_read(csr)) +#endif + #endif /* !_MACHINE_RISCVREG_H_ */ Index: projects/clang700-import/sys/sys/epoch_private.h =================================================================== --- projects/clang700-import/sys/sys/epoch_private.h (revision 340426) +++ projects/clang700-import/sys/sys/epoch_private.h (nonexistent) @@ -1,211 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2018, Matthew Macy - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef _SYS_EPOCH_PRIVATE_H_ -#define _SYS_EPOCH_PRIVATE_H_ -#ifndef _KERNEL -#error "no user serviceable parts" -#else -#include -#include - -#include - -extern void epoch_adjust_prio(struct thread *td, u_char prio); -#ifndef _SYS_SYSTM_H_ -extern void critical_exit_preempt(void); -#endif - -#ifdef __amd64__ -#define EPOCH_ALIGN CACHE_LINE_SIZE*2 -#else -#define EPOCH_ALIGN CACHE_LINE_SIZE -#endif - -/* - * Standalone (_sa) routines for thread state manipulation - */ -static __inline void -critical_enter_sa(void *tdarg) -{ - struct thread_lite *td; - - td = tdarg; - td->td_critnest++; - __compiler_membar(); -} - -static __inline void -critical_exit_sa(void *tdarg) -{ - struct thread_lite *td; - - td = tdarg; - MPASS(td->td_critnest > 0); - __compiler_membar(); - td->td_critnest--; - __compiler_membar(); - if (__predict_false(td->td_owepreempt != 0)) - critical_exit_preempt(); -} - -typedef struct epoch_thread { -#ifdef EPOCH_TRACKER_DEBUG - uint64_t et_magic_pre; -#endif - TAILQ_ENTRY(epoch_thread) et_link; /* Epoch queue. */ - struct thread *et_td; /* pointer to thread in section */ - ck_epoch_section_t et_section; /* epoch section object */ -#ifdef EPOCH_TRACKER_DEBUG - uint64_t et_magic_post; -#endif -} *epoch_thread_t; -TAILQ_HEAD (epoch_tdlist, epoch_thread); - -typedef struct epoch_record { - ck_epoch_record_t er_read_record; - ck_epoch_record_t er_write_record; - volatile struct epoch_tdlist er_tdlist; - volatile uint32_t er_gen; - uint32_t er_cpuid; -} __aligned(EPOCH_ALIGN) *epoch_record_t; - -struct epoch { - struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); - epoch_record_t e_pcpu_record; - int e_idx; - int e_flags; -}; - -static epoch_record_t -epoch_currecord(epoch_t epoch) -{ - return zpcpu_get_cpu(epoch->e_pcpu_record, curcpu); -} - -#define INIT_CHECK(epoch) \ - do { \ - if (__predict_false((epoch) == NULL)) \ - return; \ - } while (0) - -static __inline void -epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et) -{ - struct epoch_record *er; - struct epoch_thread *etd; - struct thread_lite *td; - - MPASS(cold || epoch != NULL); - INIT_CHECK(epoch); - etd = (void *)et; - MPASS(epoch->e_flags & EPOCH_PREEMPT); -#ifdef EPOCH_TRACKER_DEBUG - etd->et_magic_pre = EPOCH_MAGIC0; - etd->et_magic_post = EPOCH_MAGIC1; -#endif - td = (struct thread_lite *)curthread; - etd->et_td = (void*)td; - td->td_epochnest++; - critical_enter_sa(td); - sched_pin_lite(td); - - td->td_pre_epoch_prio = td->td_priority; - er = epoch_currecord(epoch); - TAILQ_INSERT_TAIL(&er->er_tdlist, etd, et_link); - ck_epoch_begin(&er->er_read_record, (ck_epoch_section_t *)&etd->et_section); - critical_exit_sa(td); -} - -static __inline void -epoch_enter(epoch_t epoch) -{ - struct thread_lite *td; - epoch_record_t er; - - MPASS(cold || epoch != NULL); - INIT_CHECK(epoch); - td = (struct thread_lite *)curthread; - - td->td_epochnest++; - critical_enter_sa(td); - er = epoch_currecord(epoch); - ck_epoch_begin(&er->er_read_record, NULL); -} - -static __inline void -epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et) -{ - struct epoch_record *er; - struct epoch_thread *etd; - struct thread_lite *td; - - INIT_CHECK(epoch); - td = (struct thread_lite *)curthread; - critical_enter_sa(td); - sched_unpin_lite(td); - MPASS(td->td_epochnest); - td->td_epochnest--; - er = epoch_currecord(epoch); - MPASS(epoch->e_flags & EPOCH_PREEMPT); - etd = (void *)et; - MPASS(etd != NULL); - MPASS(etd->et_td == (struct thread *)td); -#ifdef EPOCH_TRACKER_DEBUG - MPASS(etd->et_magic_pre == EPOCH_MAGIC0); - MPASS(etd->et_magic_post == EPOCH_MAGIC1); - etd->et_magic_pre = 0; - etd->et_magic_post = 0; -#endif - etd->et_td = (void*)0xDEADBEEF; - ck_epoch_end(&er->er_read_record, - (ck_epoch_section_t *)&etd->et_section); - TAILQ_REMOVE(&er->er_tdlist, etd, et_link); - er->er_gen++; - if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) - epoch_adjust_prio((struct thread *)td, td->td_pre_epoch_prio); - critical_exit_sa(td); -} - -static __inline void -epoch_exit(epoch_t epoch) -{ - struct thread_lite *td; - epoch_record_t er; - - INIT_CHECK(epoch); - td = (struct thread_lite *)curthread; - MPASS(td->td_epochnest); - td->td_epochnest--; - er = epoch_currecord(epoch); - ck_epoch_end(&er->er_read_record, NULL); - critical_exit_sa(td); -} -#endif /* _KERNEL */ -#endif /* _SYS_EPOCH_PRIVATE_H_ */ Property changes on: projects/clang700-import/sys/sys/epoch_private.h ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: projects/clang700-import/sys/sys/blist.h =================================================================== --- projects/clang700-import/sys/sys/blist.h (revision 340426) +++ projects/clang700-import/sys/sys/blist.h (revision 340427) @@ -1,108 +1,106 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Implements bitmap resource lists. * * Usage: * blist = blist_create(blocks, flags) * (void) blist_destroy(blist) * blkno = blist_alloc(blist, count) * (void) blist_free(blist, blkno, count) * nblks = blist_fill(blist, blkno, count) * (void) blist_resize(&blist, count, freeextra, flags) * * * Notes: * on creation, the entire list is marked reserved. You should * first blist_free() the sections you want to make available * for allocation before doing general blist_alloc()/free() * ops. * * SWAPBLK_NONE is returned on failure. This module is typically * capable of managing up to (2^63) blocks per blist, though * the memory utilization would be insane if you actually did * that. Managing something like 512MB worth of 4K blocks * eats around 32 KBytes of memory. * * $FreeBSD$ */ #ifndef _SYS_BLIST_H_ #define _SYS_BLIST_H_ typedef uint64_t u_daddr_t; /* unsigned disk address */ /* * note: currently use SWAPBLK_NONE as an absolute value rather then * a flag bit. */ #define SWAPBLK_MASK ((daddr_t)((u_daddr_t)-1 >> 1)) /* mask */ #define SWAPBLK_NONE ((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */ /* * Both blmeta and bmu_bitmap MUST be a power of 2 in size. */ typedef struct blmeta { - union { - daddr_t bmu_avail; /* space available under us */ - u_daddr_t bmu_bitmap; /* bitmap if we are a leaf */ - } u; + u_daddr_t bm_bitmap; /* bitmap if we are a leaf */ daddr_t bm_bighint; /* biggest contiguous block hint*/ } blmeta_t; typedef struct blist { daddr_t bl_blocks; /* area of coverage */ + daddr_t bl_avail; /* # available blocks */ u_daddr_t bl_radix; /* coverage radix */ daddr_t bl_cursor; /* next-fit search starts at */ blmeta_t bl_root[1]; /* root of radix tree */ } *blist_t; -#define BLIST_META_RADIX 16 #define BLIST_BMAP_RADIX (sizeof(u_daddr_t)*8) +#define BLIST_META_RADIX BLIST_BMAP_RADIX #define BLIST_MAX_ALLOC BLIST_BMAP_RADIX struct sbuf; daddr_t blist_alloc(blist_t blist, daddr_t count); daddr_t blist_avail(blist_t blist); blist_t blist_create(daddr_t blocks, int flags); void blist_destroy(blist_t blist); daddr_t blist_fill(blist_t bl, daddr_t blkno, daddr_t count); void blist_free(blist_t blist, daddr_t blkno, daddr_t count); void blist_print(blist_t blist); void blist_resize(blist_t *pblist, daddr_t count, int freenew, int flags); void blist_stats(blist_t blist, struct sbuf *s); #endif /* _SYS_BLIST_H_ */ Index: projects/clang700-import/sys/sys/epoch.h =================================================================== --- projects/clang700-import/sys/sys/epoch.h (revision 340426) +++ projects/clang700-import/sys/sys/epoch.h (revision 340427) @@ -1,93 +1,89 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_EPOCH_H_ #define _SYS_EPOCH_H_ + +struct epoch_context { + void *data[2]; +} __aligned(sizeof(void *)); + +typedef struct epoch_context *epoch_context_t; + #ifdef _KERNEL #include #include -#endif +#include struct epoch; typedef struct epoch *epoch_t; #define EPOCH_PREEMPT 0x1 #define EPOCH_LOCKED 0x2 extern epoch_t global_epoch; extern epoch_t global_epoch_preempt; -struct epoch_context { - void *data[2]; -} __aligned(sizeof(void *)); - -typedef struct epoch_context *epoch_context_t; - - struct epoch_tracker { - void *datap[3]; -#ifdef EPOCH_TRACKER_DEBUG - int datai[5]; -#else - int datai[1]; +#ifdef EPOCH_TRACKER_DEBUG +#define EPOCH_MAGIC0 0xFADECAFEF00DD00D +#define EPOCH_MAGIC1 0xBADDBABEDEEDFEED + uint64_t et_magic_pre; #endif + TAILQ_ENTRY(epoch_tracker) et_link; + struct thread *et_td; + ck_epoch_section_t et_section; +#ifdef EPOCH_TRACKER_DEBUG + uint64_t et_magic_post; +#endif } __aligned(sizeof(void *)); - typedef struct epoch_tracker *epoch_tracker_t; epoch_t epoch_alloc(int flags); void epoch_free(epoch_t epoch); void epoch_wait(epoch_t epoch); void epoch_wait_preempt(epoch_t epoch); void epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t)); int in_epoch(epoch_t epoch); int in_epoch_verbose(epoch_t epoch, int dump_onfail); -#ifdef _KERNEL DPCPU_DECLARE(int, epoch_cb_count); DPCPU_DECLARE(struct grouptask, epoch_cb_task); #define EPOCH_MAGIC0 0xFADECAFEF00DD00D #define EPOCH_MAGIC1 0xBADDBABEDEEDFEED -void epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et); -void epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et); -void epoch_enter_KBI(epoch_t epoch); -void epoch_exit_KBI(epoch_t epoch); +void epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et); +void epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et); +void epoch_enter(epoch_t epoch); +void epoch_exit(epoch_t epoch); +void epoch_thread_init(struct thread *); +void epoch_thread_fini(struct thread *); -#if defined(KLD_MODULE) && !defined(KLD_TIED) -#define epoch_enter_preempt(e, t) epoch_enter_preempt_KBI((e), (t)) -#define epoch_exit_preempt(e, t) epoch_exit_preempt_KBI((e), (t)) -#define epoch_enter(e) epoch_enter_KBI((e)) -#define epoch_exit(e) epoch_exit_KBI((e)) -#else -#include -#endif /* KLD_MODULE */ - -#endif /* _KERNEL */ -#endif +#endif /* _KERNEL */ +#endif /* _SYS_EPOCH_H_ */ Index: projects/clang700-import/sys/sys/proc.h =================================================================== --- projects/clang700-import/sys/sys/proc.h (revision 340426) +++ projects/clang700-import/sys/sys/proc.h (revision 340427) @@ -1,1163 +1,1165 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD$ */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #include #ifndef _KERNEL #include #endif #include #include #include #include #include #include #include /* XXX. */ #include #include #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #include #endif #include #include #include #include #include /* Machine-dependent proc substruct. */ #ifdef _KERNEL #include #endif /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { u_int s_count; /* Ref cnt; pgrps in session - atomic. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ struct tty *s_ttyp; /* (e) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members. */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Process group id. */ int pg_jobc; /* (m) Job control process count. */ struct mtx pg_mtx; /* Mutex to protect members */ }; /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by proc slock * k - only accessed by curthread * k*- only accessed by curthread and from an interrupt * kx- only accessed by curthread and by debugger * l - the attaching proc or attaching proc parent * m - Giant * n - not locked, lazy * o - ktrace lock * q - td_contested lock * r - p_peers lock * s - see sleepq_switch(), sleeping_on_old_rtc(), and sleep(9) * t - thread lock * u - process stat lock * w - process timer lock * x - created at fork, only changes during single threading in exec * y - created at first aio, doesn't change until exit or exec at which * point we are single-threaded and only curthread changes it * z - zombie threads lock * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct cpuset; struct filecaps; struct filemon; struct kaioinfo; struct kaudit_record; struct kdtrace_proc; struct kdtrace_thread; struct mqueue_notifier; struct nlminfo; struct p_sched; struct proc; struct procdesc; struct racct; struct sbuf; struct sleepqueue; struct socket; struct syscall_args; struct td_sched; struct thread; struct trapframe; struct turnstile; struct vm_map; struct vm_map_entry; +struct epoch_tracker; /* * XXX: Does this belong in resource.h or resourcevar.h instead? * Resource usage extension. The times in rusage structs in the kernel are * never up to date. The actual times are kept as runtimes and tick counts * (with control info in the "previous" times), and are converted when * userland asks for rusage info. Backwards compatibility prevents putting * this directly in the user-visible rusage struct. * * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux. * Locking for td_rux: (t) for all fields. */ struct rusage_ext { uint64_t rux_runtime; /* (cu) Real time. */ uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */ uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */ uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */ uint64_t rux_uu; /* (c) Previous user time in usec. */ uint64_t rux_su; /* (c) Previous sys time in usec. */ uint64_t rux_tu; /* (c) Previous total time in usec. */ }; /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * Thread context. Processes may have multiple threads. */ struct thread { struct mtx *volatile td_lock; /* replaces sched lock */ struct proc *td_proc; /* (*) Associated process. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct domainset_ref td_domain; /* (a) NUMA policy */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ #define td_siglist td_sigqueue.sq_signals u_char td_lend_user_pri; /* (t) Lend user pri. */ /* Cleared during fork1() */ #define td_startzero td_epochnest u_char td_epochnest; /* (k) Epoch nest counter. */ int td_flags; /* (t) TDF_* flags. */ int td_inhibitors; /* (t) Why can not run. */ int td_pflags; /* (k) Private thread (TDP_*) flags. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ void *td_wchan; /* (t) Sleep address. */ const char *td_wmesg; /* (t) Reason for sleep. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ short td_locks; /* (k) Debug: count of non-spin locks */ short td_rw_rlocks; /* (k) Count of rwlock read locks. */ short td_sx_slocks; /* (k) Count of sx shared locks. */ short td_lk_slocks; /* (k) Count of lockmgr shared locks. */ short td_stopsched; /* (k) Scheduler stopped. */ struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ const char *td_lockname; /* (t) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ int td_pinned; /* (k) Temporary cpu pin count. */ struct ucred *td_ucred; /* (k) Reference to credentials. */ struct plimit *td_limit; /* (k) Resource limits. */ int td_slptick; /* (t) Time at sleep. */ int td_blktick; /* (t) Time spent blocked. */ int td_swvoltick; /* (t) Time at last SW_VOL switch. */ int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */ u_int td_cow; /* (*) Number of copy-on-write faults */ struct rusage td_ru; /* (t) rusage information. */ struct rusage_ext td_rux; /* (t) Internal rusage information. */ uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ u_int td_pticks; /* (t) Statclock hits for profiling */ u_int td_sticks; /* (t) Statclock hits in system mode. */ u_int td_iticks; /* (t) Statclock hits in intr mode. */ u_int td_uticks; /* (t) Statclock hits in user mode. */ int td_intrval; /* (t) Return value for sleepq. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ volatile u_int td_generation; /* (k) For detection of preemption */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ int td_xsig; /* (c) Signal for ptrace */ u_long td_profil_addr; /* (k) Temporary addr until AST. */ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ struct file *td_fpop; /* (k) file referencing cdev under op */ int td_dbgflags; /* (c) Userland debugger flags */ siginfo_t td_si; /* (c) For debugger or core file */ int td_ng_outbound; /* (k) Thread entered ng from above. */ struct osd td_osd; /* (k) Object specific data. */ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ pid_t td_dbg_forked; /* (c) Child pid for debugger. */ u_int td_vp_reserv; /* (k) Count of reserved vnodes. */ int td_no_sleeping; /* (k) Sleeping disabled count. */ void *td_su; /* (k) FFS SU private */ sbintime_t td_sleeptimo; /* (t) Sleep timeout. */ int td_rtcgen; /* (s) rtc_generation of abs. sleep */ size_t td_vslock_sz; /* (k) amount of vslock-ed space */ #define td_endzero td_sigmask /* Copied during fork1() or create_thread(). */ #define td_startcopy td_endzero sigset_t td_sigmask; /* (c) Current signal mask. */ u_char td_rqindex; /* (t) Run queue index. */ u_char td_base_pri; /* (t) Thread base kernel priority. */ u_char td_priority; /* (t) Thread active priority. */ u_char td_pri_class; /* (t) Scheduling class. */ u_char td_user_pri; /* (t) User pri from estcpu and nice. */ u_char td_base_user_pri; /* (t) Base user pri */ u_char td_pre_epoch_prio; /* (k) User pri on entry to epoch */ uintptr_t td_rb_list; /* (k) Robust list head. */ uintptr_t td_rbp_list; /* (k) Robust priv list head. */ uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */ struct syscall_args td_sa; /* (kx) Syscall parameters. Copied on fork for child tracing. */ #define td_endcopy td_pcb /* * Fields that must be manually set in fork1() or create_thread() * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; /* (t) thread state */ union { register_t tdu_retval[2]; off_t tdu_off; } td_uretoff; /* (k) Syscall aux returns. */ #define td_retval td_uretoff.tdu_retval u_int td_cowgen; /* (k) Generation of COW pointers. */ /* LP64 hole */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ struct vm_object *td_kstack_obj;/* (a) Kstack object. */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ int td_kstack_pages; /* (a) Size of the kstack. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ int td_errno; /* Error returned by last syscall. */ /* LP64 hole */ struct vnet *td_vnet; /* (k) Effective vnet. */ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ struct proc *td_rfppwait_p; /* (k) The vforked child */ struct vm_page **td_ma; /* (k) uio pages held */ int td_ma_cnt; /* (k) size of *td_ma */ /* LP64 hole */ void *td_emuldata; /* Emulator state data */ int td_lastcpu; /* (t) Last cpu we were on. */ int td_oncpu; /* (t) Which cpu we are on. */ void *td_lkpi_task; /* LinuxKPI task struct pointer */ + struct epoch_tracker *td_et; /* (k) compat KPI spare tracker */ int td_pmcpend; }; struct thread0_storage { struct thread t0st_thread; uint64_t t0st_sched[10]; }; struct mtx *thread_lock_block(struct thread *); void thread_lock_unblock(struct thread *, struct mtx *); void thread_lock_set(struct thread *, struct mtx *); #define THREAD_LOCK_ASSERT(td, type) \ do { \ struct mtx *__m = (td)->td_lock; \ if (__m != &blocked_lock) \ mtx_assert(__m, (type)); \ } while (0) #ifdef INVARIANTS #define THREAD_LOCKPTR_ASSERT(td, lock) \ do { \ struct mtx *__m = (td)->td_lock; \ KASSERT((__m == &blocked_lock || __m == (lock)), \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define TD_LOCKS_INC(td) ((td)->td_locks++) #define TD_LOCKS_DEC(td) do { \ KASSERT(SCHEDULER_STOPPED_TD(td) || (td)->td_locks > 0, \ ("thread %p owns no locks", (td))); \ (td)->td_locks--; \ } while (0) #else #define THREAD_LOCKPTR_ASSERT(td, lock) #define TD_LOCKS_INC(td) #define TD_LOCKS_DEC(td) #endif /* * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ #define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */ #define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */ #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */ #define TDF_UNUSED12 0x00001000 /* --available-- */ #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ #define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */ #define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */ #define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */ #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ #define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */ #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ #define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */ #define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */ #define TDF_UNUSED23 0x00800000 /* --available-- */ #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ #define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */ #define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */ #define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */ /* Userland debug flags */ #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new process */ #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child only) */ #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ #define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */ #define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */ #define TDB_VFORK 0x00000800 /* vfork indicator for ptrace() */ #define TDB_FSTP 0x00001000 /* The thread is PT_ATTACH leader */ #define TDB_STEP 0x00002000 /* (x86) PSL_T set for PT_STEP */ /* * "Private" flags kept in td_pflags: * These are only written by curthread and thus need no locking. */ #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ #define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */ #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ #define TDP_UNUSED9 0x00000100 /* --available-- */ #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ #define TDP_FORKING 0x20000000 /* Thread is being created through fork() */ #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ /* * Reasons that the current thread can not be run yet. * More than one may apply. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING) #define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ) #define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED) #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) #define KTDSTATE(td) \ (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \ ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \ ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \ ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \ ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding") #define TD_SET_INHIB(td, inhib) do { \ (td)->td_state = TDS_INHIBITED; \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ (td)->td_state = TDS_CAN_RUN; \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING #define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ #define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN #define TD_SBDRY_INTR(td) \ (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0) #define TD_SBDRY_ERRNO(td) \ (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART) /* * Process structure. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ struct mtx p_slock; /* process spin lock */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Open files. */ struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (c) Resource limits. */ struct callout p_limco; /* (c) Limit callout handle */ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ int p_flag; /* (c) P_* flags. */ int p_flag2; /* (c) P2_* flags. */ enum { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* threads can be run. */ PRS_ZOMBIE } p_state; /* (j/c) Process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct proc *p_reaper; /* (e) My reaper. */ LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants (if I am reaper). */ LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of the same reaper. */ struct mtx p_mtx; /* (n) Lock for this struct. */ struct mtx p_statmtx; /* Lock for the stats */ struct mtx p_itimmtx; /* Lock for the virt/prof timers */ struct mtx p_profmtx; /* Lock for the profiling */ struct ksiginfo *p_ksi; /* Locked by parent proc lock */ sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ #define p_siglist p_sigqueue.sq_signals /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_oppid pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */ struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtick; /* (c) Tick when swapped in or out. */ u_int p_cowgen; /* (c) Generation of COW pointers. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct rusage p_ru; /* (a) Exit information. */ struct rusage_ext p_rux; /* (cu) Internal resource usage. */ struct rusage_ext p_crux; /* (c) Internal child resource usage. */ int p_profthreads; /* (c) Num threads in addupc_task. */ volatile int p_exitthreads; /* (j) Number of threads exiting */ int p_traceflag; /* (o) Kernel trace points. */ struct vnode *p_tracevp; /* (c + o) Trace to vnode. */ struct ucred *p_tracecred; /* (o) Credentials to trace with. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ u_int p_lock; /* (c) Proclock (prevent swap) count. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_long p_code; /* (n) For core dump/debugger XXX. */ u_int p_stops; /* (c) Stop event bitmask. */ u_int p_stype; /* (c) Stop event type. */ char p_step; /* (c) Process is stopped. */ u_char p_pfsflags; /* (c) Procfs flags. */ u_int p_ptevents; /* (c + e) ptrace() event mask. */ struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */ struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ struct thread *p_singlethread;/* (c + j) If single threading this is it */ int p_suspcount; /* (j) Num threads in suspended mode. */ struct thread *p_xthread; /* (c) Trap thread */ int p_boundary_count;/* (j) Num threads at user boundary */ int p_pendingcnt; /* how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ u_int p_treeflag; /* (e) P_TREE flags */ int p_pendingexits; /* (c) Count of pending thread exits. */ struct filemon *p_filemon; /* (c) filemon-specific data. */ int p_pdeathsig; /* (c) Signal from parent on exit. */ /* End area that is zeroed on creation. */ #define p_endzero p_magic /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero u_int p_magic; /* (b) Magic number. */ int p_osrel; /* (x) osreldate for the binary (from ELF note, if any) */ char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ signed char p_nice; /* (c) Process "nice" value. */ int p_fibnum; /* in this routing domain XXX MRT */ pid_t p_reapsubtree; /* (e) Pid of the direct child of the reaper which spawned our subtree. */ uint16_t p_elf_machine; /* (x) ELF machine type */ uint64_t p_elf_flags; /* (x) ELF flags */ /* End area that is copied on creation. */ #define p_endcopy p_xexit u_int p_xexit; /* (c) Exit code. */ u_int p_xsig; /* (c) Stop/kill sig. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ struct knlist *p_klist; /* (c) Knotes attached to this proc. */ int p_numthreads; /* (c) Number of threads. */ struct mdproc p_md; /* Any machine-dependent fields. */ struct callout p_itcallout; /* (h + c) Interval timer callout. */ u_short p_acflag; /* (c) Accounting flags. */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label *p_label; /* (*) Proc (not subject) MAC label. */ STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ struct cv p_pwait; /* (*) wait cv for exit/exec. */ uint64_t p_prev_runtime; /* (c) Resource usage accounting. */ struct racct *p_racct; /* (b) Resource accounting. */ int p_throttled; /* (c) Flag for racct pcpu throttling */ /* * An orphan is the child that has been re-parented to the * debugger as a result of attaching to it. Need to keep * track of them for parent to be able to collect the exit * status of what used to be children. */ LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ }; #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU (-1) /* For when we aren't on a CPU. */ #define NOCPU_OLD (255) #define MAXCPU_OLD (254) #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx) #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx) #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type)) #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx) #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx) #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type)) #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx) #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx) #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type)) /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00002 /* Has a controlling terminal. */ #define P_KPROC 0x00004 /* Kernel process. */ #define P_UNUSED3 0x00008 /* --available-- */ #define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ #define P_PROFIL 0x00020 /* Has started profiling. */ #define P_STOPPROF 0x00040 /* Has thread requesting to stop profiling. */ #define P_HADTHREADS 0x00080 /* Has had threads (no cleanup shortcuts) */ #define P_SUGID 0x00100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ #define P_SINGLE_EXIT 0x00400 /* Threads suspending should exit, not wait. */ #define P_TRACED 0x00800 /* Debugged process being traced. */ #define P_WAITED 0x01000 /* Someone is waiting for us. */ #define P_WEXIT 0x02000 /* Working on exiting. */ #define P_EXEC 0x04000 /* Process called exec. */ #define P_WKILLED 0x08000 /* Killed, go to kernel/user boundary ASAP. */ #define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */ #define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP. */ #define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing. */ #define P_STOPPED_SINGLE 0x80000 /* Only 1 thread can continue (not to user). */ #define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */ #define P_SIGEVENT 0x200000 /* Process pending signals changed. */ #define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */ #define P_HWPMC 0x800000 /* Process is using HWPMCs */ #define P_JAILED 0x1000000 /* Process is in jail. */ #define P_TOTAL_STOP 0x2000000 /* Stopped in stop_all_proc. */ #define P_INEXEC 0x4000000 /* Process is in execve(). */ #define P_STATCHILD 0x8000000 /* Child process stopped or exited. */ #define P_INMEM 0x10000000 /* Loaded into memory. */ #define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */ #define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */ #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) #define P_KILLED(p) ((p)->p_flag & P_WKILLED) /* These flags are kept in p_flag2. */ #define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */ #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */ #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ #define P2_PTRACE_FSTP 0x00000010 /* SIGSTOP from PT_ATTACH not yet handled. */ #define P2_TRAPCAP 0x00000020 /* SIGTRAP on ENOTCAPABLE */ /* Flags protected by proctree_lock, kept in p_treeflags. */ #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan list */ #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ /* * These were process status values (p_stat), now they are only used in * legacy conversion code. */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL /* Types and flags for mi_switch(). */ #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ #define SWT_NONE 0 /* Unspecified switch. */ #define SWT_PREEMPT 1 /* Switching due to preemption. */ #define SWT_OWEPREEMPT 2 /* Switching due to owepreempt. */ #define SWT_TURNSTILE 3 /* Turnstile contention. */ #define SWT_SLEEPQ 4 /* Sleepq wait. */ #define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */ #define SWT_RELINQUISH 6 /* yield call. */ #define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */ #define SWT_IDLE 8 /* Switching from the idle thread. */ #define SWT_IWAIT 9 /* Waiting for interrupts. */ #define SWT_SUSPEND 10 /* Thread suspended. */ #define SWT_REMOTEPREEMPT 11 /* Remote processor preempted. */ #define SWT_REMOTEWAKEIDLE 12 /* Remote processor preempted idle. */ #define SWT_COUNT 13 /* Number of switch types. */ /* Flags */ #define SW_VOL 0x0100 /* Voluntary switch. */ #define SW_INVOL 0x0200 /* Involuntary switch. */ #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ /* How values for thread_single(). */ #define SINGLE_NO_EXIT 0 #define SINGLE_EXIT 1 #define SINGLE_BOUNDARY 2 #define SINGLE_ALLPROC 3 #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_PARGS); MALLOC_DECLARE(M_PGRP); MALLOC_DECLARE(M_SESSION); MALLOC_DECLARE(M_SUBPROC); #endif #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) /* * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit * in a pid_t, as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID 100000 extern pid_t pid_max; #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) #define STOPEVENT(p, e, v) do { \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \ "checking stopevent %d", (e)); \ if ((p)->p_stops & (e)) { \ PROC_LOCK(p); \ stopevent((p), (e), (v)); \ PROC_UNLOCK(p); \ } \ } while (0) #define _STOPEVENT(p, e, v) do { \ PROC_LOCK_ASSERT(p, MA_OWNED); \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.lock_object, \ "checking stopevent %d", (e)); \ if ((p)->p_stops & (e)) \ stopevent((p), (e), (v)); \ } while (0) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0) #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0) /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* * Non-zero p_lock ensures that: * - exit1() is not performed until p_lock reaches zero; * - the process' threads stack are not swapped out if they are currently * not (P_INMEM). * * PHOLD() asserts that the process (except the current process) is * not exiting, increments p_lock and swaps threads stacks into memory, * if needed. * _PHOLD() is same as PHOLD(), it takes the process locked. * _PHOLD_LITE() also takes the process locked, but comparing with * _PHOLD(), it only guarantees that exit1() is not executed, * faultin() is not called. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ if (((p)->p_flag & P_INMEM) == 0) \ faultin((p)); \ } while (0) #define _PHOLD_LITE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ } while (0) #define PROC_ASSERT_HELD(p) do { \ KASSERT((p)->p_lock > 0, ("process %p not held", p)); \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ PROC_ASSERT_HELD(p); \ (--(p)->p_lock); \ if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ wakeup(&(p)->p_lock); \ } while (0) #define PROC_ASSERT_NOT_HELD(p) do { \ KASSERT((p)->p_lock == 0, ("process %p held", p)); \ } while (0) #define PROC_UPDATE_COW(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ (p)->p_cowgen++; \ } while (0) /* Check whether a thread is safe to be swapped out. */ #define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP) /* Control whether or not it is safe for curthread to sleep. */ #define THREAD_NO_SLEEPING() ((curthread)->td_no_sleeping++) #define THREAD_SLEEPING_OK() ((curthread)->td_no_sleeping--) #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern u_long pidhash; #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) extern LIST_HEAD(tidhashhead, thread) *tidhashtbl; extern u_long tidhash; extern struct rwlock tidhash_lock; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern int allproc_gen; extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread0_storage thread0_st; /* Primary thread in proc0. */ #define thread0 (thread0_st.t0st_thread) extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int lastpid; extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proclist zombproc; /* List of zombie processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct uma_zone *proc_zone; struct proc *pfind(pid_t); /* Find process by id. */ struct proc *pfind_any(pid_t); /* Find (zombie) process by id. */ struct proc *pfind_locked(pid_t pid); struct pgrp *pgfind(pid_t); /* Find process group by id. */ struct proc *zpfind(pid_t); /* Find zombie process by id. */ struct fork_req { int fr_flags; int fr_pages; int *fr_pidp; struct proc **fr_procp; int *fr_pd_fd; int fr_pd_flags; struct filecaps *fr_pd_fcaps; }; /* * pget() flags. */ #define PGET_HOLD 0x00001 /* Hold the process. */ #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) int pget(pid_t pid, int flags, struct proc **pp); void ast(struct trapframe *framep); struct thread *choosethread(void); int cr_cansee(struct ucred *u1, struct ucred *u2); int cr_canseesocket(struct ucred *cred, struct socket *so); int cr_canseeothergids(struct ucred *u1, struct ucred *u2); int cr_canseeotheruids(struct ucred *u1, struct ucred *u2); int cr_canseejailproc(struct ucred *u1, struct ucred *u2); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); void faultin(struct proc *p); void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); int fork1(struct thread *, struct fork_req *); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); void kern_proc_vmmap_resident(struct vm_map *map, struct vm_map_entry *entry, int *resident_count, bool *super); void kern_yield(int); void kick_proc0(void); void killjobc(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); void maybe_yield(void); void mi_switch(int flags, struct thread *newtd); int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); int p_canwait(struct thread *td, struct proc *p); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_hold(struct pargs *pa); int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); void procinit(void); void proc_linkup0(struct proc *p, struct thread *td); void proc_linkup(struct proc *p, struct thread *td); struct proc *proc_realparent(struct proc *child); void proc_reap(struct thread *td, struct proc *p, int *status, int options); void proc_reparent(struct proc *child, struct proc *newparent); void proc_set_traced(struct proc *p, bool stop); void proc_wkilled(struct proc *p); struct pstats *pstats_alloc(void); void pstats_fork(struct pstats *src, struct pstats *dst); void pstats_free(struct pstats *ps); void reaper_abandon_children(struct proc *p, bool exiting); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void sess_hold(struct session *); void sess_release(struct session *); int setrunnable(struct thread *); void setsugid(struct proc *p); int should_yield(void); int sigonstack(size_t sp); void stopevent(struct proc *, u_int, u_int); struct thread *tdfind(lwpid_t, pid_t); void threadinit(void); void tidhash_add(struct thread *); void tidhash_remove(struct thread *); void cpu_idle(int); int cpu_idle_wakeup(int); extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *, struct thread *, struct mtx *); void cpu_throw(struct thread *, struct thread *) __dead2; void unsleep(struct thread *); void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int, int) __dead2; void cpu_copy_thread(struct thread *td, struct thread *td0); int cpu_fetch_syscall_args(struct thread *td); void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *); void cpu_set_syscall_retval(struct thread *, int); void cpu_set_upcall(struct thread *, void (*)(void *), void *, stack_t *); int cpu_set_user_tls(struct thread *, void *tls_base); void cpu_thread_alloc(struct thread *); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_free(struct thread *); void cpu_thread_swapin(struct thread *); void cpu_thread_swapout(struct thread *); struct thread *thread_alloc(int pages); int thread_alloc_stack(struct thread *, int pages); void thread_cow_get_proc(struct thread *newtd, struct proc *p); void thread_cow_get(struct thread *newtd, struct thread *td); void thread_cow_free(struct thread *td); void thread_cow_update(struct thread *td); int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk); void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); void thread_reap(void); int thread_single(struct proc *p, int how); void thread_single_end(struct proc *p, int how); void thread_stash(struct thread *td); void thread_stopped(struct proc *p); void childproc_stopped(struct proc *child, int reason); void childproc_continued(struct proc *child); void childproc_exited(struct proc *child); int thread_suspend_check(int how); bool thread_suspend_check_needed(void); void thread_suspend_switch(struct thread *, struct proc *p); void thread_suspend_one(struct thread *td); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_wait(struct proc *p); struct thread *thread_find(struct proc *p, lwpid_t tid); void stop_all_proc(void); void resume_all_proc(void); static __inline int curthread_pflags_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags & flags); td->td_pflags |= flags; return (save); } static __inline void curthread_pflags_restore(int save) { curthread->td_pflags &= save; } static __inline __pure2 struct td_sched * td_get_sched(struct thread *td) { return ((struct td_sched *)&td[1]); } extern void (*softdep_ast_cleanup)(struct thread *); static __inline void td_softdep_cleanup(struct thread *td) { if (td->td_su != NULL && softdep_ast_cleanup != NULL) softdep_ast_cleanup(td); } #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */ Index: projects/clang700-import/sys/ufs/ffs/ffs_extern.h =================================================================== --- projects/clang700-import/sys/ufs/ffs/ffs_extern.h (revision 340426) +++ projects/clang700-import/sys/ufs/ffs/ffs_extern.h (revision 340427) @@ -1,225 +1,225 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_extern.h 8.6 (Berkeley) 3/30/95 * $FreeBSD$ */ #ifndef _UFS_FFS_EXTERN_H #define _UFS_FFS_EXTERN_H #ifndef _KERNEL #error "No user-serving parts inside" #else struct buf; struct cg; struct fid; struct fs; struct inode; struct malloc_type; struct mount; struct thread; struct sockaddr; struct statfs; struct ucred; struct vnode; struct vop_fsync_args; struct vop_reallocblks_args; struct workhead; int ffs_alloc(struct inode *, ufs2_daddr_t, ufs2_daddr_t, int, int, struct ucred *, ufs2_daddr_t *); int ffs_balloc_ufs1(struct vnode *a_vp, off_t a_startoffset, int a_size, struct ucred *a_cred, int a_flags, struct buf **a_bpp); int ffs_balloc_ufs2(struct vnode *a_vp, off_t a_startoffset, int a_size, struct ucred *a_cred, int a_flags, struct buf **a_bpp); int ffs_blkatoff(struct vnode *, off_t, char **, struct buf **); void ffs_blkfree(struct ufsmount *, struct fs *, struct vnode *, ufs2_daddr_t, long, ino_t, enum vtype, struct workhead *, u_long); ufs2_daddr_t ffs_blkpref_ufs1(struct inode *, ufs_lbn_t, int, ufs1_daddr_t *); ufs2_daddr_t ffs_blkpref_ufs2(struct inode *, ufs_lbn_t, int, ufs2_daddr_t *); void ffs_blkrelease_finish(struct ufsmount *, u_long); u_long ffs_blkrelease_start(struct ufsmount *, struct vnode *, ino_t); int ffs_checkfreefile(struct fs *, struct vnode *, ino_t); void ffs_clrblock(struct fs *, u_char *, ufs1_daddr_t); void ffs_clusteracct(struct fs *, struct cg *, ufs1_daddr_t, int); void ffs_bdflush(struct bufobj *, struct buf *); int ffs_copyonwrite(struct vnode *, struct buf *); int ffs_flushfiles(struct mount *, int, struct thread *); void ffs_fragacct(struct fs *, int, int32_t [], int); int ffs_freefile(struct ufsmount *, struct fs *, struct vnode *, ino_t, int, struct workhead *); void ffs_fserr(struct fs *, ino_t, char *); int ffs_getcg(struct fs *, struct vnode *, u_int, struct buf **, struct cg **); int ffs_isblock(struct fs *, u_char *, ufs1_daddr_t); int ffs_isfreeblock(struct fs *, u_char *, ufs1_daddr_t); -void ffs_load_inode(struct buf *, struct inode *, struct fs *, ino_t); +int ffs_load_inode(struct buf *, struct inode *, struct fs *, ino_t); void ffs_oldfscompat_write(struct fs *, struct ufsmount *); int ffs_own_mount(const struct mount *mp); int ffs_reallocblks(struct vop_reallocblks_args *); int ffs_realloccg(struct inode *, ufs2_daddr_t, ufs2_daddr_t, ufs2_daddr_t, int, int, int, struct ucred *, struct buf **); int ffs_reload(struct mount *, struct thread *, int); int ffs_sbget(void *, struct fs **, off_t, struct malloc_type *, int (*)(void *, off_t, void **, int)); int ffs_sbput(void *, struct fs *, off_t, int (*)(void *, off_t, void *, int)); int ffs_sbupdate(struct ufsmount *, int, int); void ffs_setblock(struct fs *, u_char *, ufs1_daddr_t); int ffs_snapblkfree(struct fs *, struct vnode *, ufs2_daddr_t, long, ino_t, enum vtype, struct workhead *); void ffs_snapremove(struct vnode *vp); int ffs_snapshot(struct mount *mp, char *snapfile); void ffs_snapshot_mount(struct mount *mp); void ffs_snapshot_unmount(struct mount *mp); void ffs_susp_initialize(void); void ffs_susp_uninitialize(void); void ffs_sync_snap(struct mount *, int); int ffs_syncvnode(struct vnode *vp, int waitfor, int flags); int ffs_truncate(struct vnode *, off_t, int, struct ucred *); int ffs_update(struct vnode *, int); int ffs_valloc(struct vnode *, int, struct ucred *, struct vnode **); int ffs_vfree(struct vnode *, ino_t, int); vfs_vget_t ffs_vget; int ffs_vgetf(struct mount *, ino_t, int, struct vnode **, int); void process_deferred_inactive(struct mount *mp); /* * Flags to ffs_vgetf */ #define FFSV_FORCEINSMQ 0x0001 /* * Flags to ffs_reload */ #define FFSR_FORCE 0x0001 #define FFSR_UNSUSPEND 0x0002 /* * Definitions for TRIM interface * * Special keys and recommended hash table size */ #define NOTRIM_KEY 1 /* never written, so don't call trim for it */ #define SINGLETON_KEY 2 /* only block being freed, so trim it now */ #define FIRST_VALID_KEY 3 /* first valid key describing a block range */ #define MAXTRIMIO 1024 /* maximum expected outstanding trim requests */ extern struct vop_vector ffs_vnodeops1; extern struct vop_vector ffs_fifoops1; extern struct vop_vector ffs_vnodeops2; extern struct vop_vector ffs_fifoops2; /* * Soft update function prototypes. */ int softdep_check_suspend(struct mount *, struct vnode *, int, int, int, int); void softdep_get_depcounts(struct mount *, int *, int *); void softdep_initialize(void); void softdep_uninitialize(void); int softdep_mount(struct vnode *, struct mount *, struct fs *, struct ucred *); void softdep_unmount(struct mount *); int softdep_move_dependencies(struct buf *, struct buf *); int softdep_flushworklist(struct mount *, int *, struct thread *); int softdep_flushfiles(struct mount *, int, struct thread *); void softdep_update_inodeblock(struct inode *, struct buf *, int); void softdep_load_inodeblock(struct inode *); void softdep_freefile(struct vnode *, ino_t, int); int softdep_request_cleanup(struct fs *, struct vnode *, struct ucred *, int); void softdep_setup_freeblocks(struct inode *, off_t, int); void softdep_setup_inomapdep(struct buf *, struct inode *, ino_t, int); void softdep_setup_blkmapdep(struct buf *, struct mount *, ufs2_daddr_t, int, int); void softdep_setup_allocdirect(struct inode *, ufs_lbn_t, ufs2_daddr_t, ufs2_daddr_t, long, long, struct buf *); void softdep_setup_allocext(struct inode *, ufs_lbn_t, ufs2_daddr_t, ufs2_daddr_t, long, long, struct buf *); void softdep_setup_allocindir_meta(struct buf *, struct inode *, struct buf *, int, ufs2_daddr_t); void softdep_setup_allocindir_page(struct inode *, ufs_lbn_t, struct buf *, int, ufs2_daddr_t, ufs2_daddr_t, struct buf *); void softdep_setup_blkfree(struct mount *, struct buf *, ufs2_daddr_t, int, struct workhead *); void softdep_setup_inofree(struct mount *, struct buf *, ino_t, struct workhead *); void softdep_setup_sbupdate(struct ufsmount *, struct fs *, struct buf *); void softdep_fsync_mountdev(struct vnode *); int softdep_sync_metadata(struct vnode *); int softdep_sync_buf(struct vnode *, struct buf *, int); int softdep_fsync(struct vnode *); int softdep_prealloc(struct vnode *, int); int softdep_journal_lookup(struct mount *, struct vnode **); void softdep_journal_freeblocks(struct inode *, struct ucred *, off_t, int); void softdep_journal_fsync(struct inode *); void softdep_buf_append(struct buf *, struct workhead *); void softdep_inode_append(struct inode *, struct ucred *, struct workhead *); void softdep_freework(struct workhead *); /* * Things to request flushing in softdep_request_cleanup() */ #define FLUSH_INODES 1 #define FLUSH_INODES_WAIT 2 #define FLUSH_BLOCKS 3 #define FLUSH_BLOCKS_WAIT 4 /* * Flag to ffs_syncvnode() to request flushing of data only, * but skip the ffs_update() on the inode itself. Used to avoid * deadlock when flushing snapshot inodes while holding snaplk. */ #define NO_INO_UPDT 0x00000001 /* * Request data sync only from ffs_syncvnode(), not touching even more * metadata than NO_INO_UPDT. */ #define DATA_ONLY 0x00000002 int ffs_rdonly(struct inode *); TAILQ_HEAD(snaphead, inode); struct snapdata { LIST_ENTRY(snapdata) sn_link; struct snaphead sn_head; daddr_t sn_listsize; daddr_t *sn_blklist; struct lock sn_lock; }; #endif /* _KERNEL */ #endif /* !_UFS_FFS_EXTERN_H */ Index: projects/clang700-import/sys/ufs/ffs/ffs_inode.c =================================================================== --- projects/clang700-import/sys/ufs/ffs/ffs_inode.c (revision 340426) +++ projects/clang700-import/sys/ufs/ffs/ffs_inode.c (revision 340427) @@ -1,778 +1,784 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 */ #include __FBSDID("$FreeBSD$"); #include "opt_quota.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int ffs_indirtrunc(struct inode *, ufs2_daddr_t, ufs2_daddr_t, ufs2_daddr_t, int, ufs2_daddr_t *); /* * Update the access, modified, and inode change times as specified by the * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode * to disk if the IN_MODIFIED flag is set (it may be set initially, or by * the timestamp update). The IN_LAZYMOD flag is set to force a write * later if not now. The IN_LAZYACCESS is set instead of IN_MODIFIED if the fs * is currently being suspended (or is suspended) and vnode has been accessed. * If we write now, then clear IN_MODIFIED, IN_LAZYACCESS and IN_LAZYMOD to * reflect the presumably successful write, and if waitfor is set, then wait * for the write to complete. */ int ffs_update(vp, waitfor) struct vnode *vp; int waitfor; { struct fs *fs; struct buf *bp; struct inode *ip; int flags, error; ASSERT_VOP_ELOCKED(vp, "ffs_update"); ufs_itimes(vp); ip = VTOI(vp); if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) return (0); ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); fs = ITOFS(ip); if (fs->fs_ronly && ITOUMP(ip)->um_fsckpid == 0) return (0); /* * If we are updating a snapshot and another process is currently * writing the buffer containing the inode for this snapshot then * a deadlock can occur when it tries to check the snapshot to see * if that block needs to be copied. Thus when updating a snapshot * we check to see if the buffer is already locked, and if it is * we drop the snapshot lock until the buffer has been written * and is available to us. We have to grab a reference to the * snapshot vnode to prevent it from being removed while we are * waiting for the buffer. */ flags = 0; if (IS_SNAPSHOT(ip)) flags = GB_LOCK_NOWAIT; loop: error = bread_gb(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int) fs->fs_bsize, NOCRED, flags, &bp); if (error != 0) { if (error != EBUSY) return (error); KASSERT((IS_SNAPSHOT(ip)), ("EBUSY from non-snapshot")); /* * Wait for our inode block to become available. * * Hold a reference to the vnode to protect against * ffs_snapgone(). Since we hold a reference, it can only * get reclaimed (VI_DOOMED flag) in a forcible downgrade * or unmount. For an unmount, the entire filesystem will be * gone, so we cannot attempt to touch anything associated * with it while the vnode is unlocked; all we can do is * pause briefly and try again. If when we relock the vnode * we discover that it has been reclaimed, updating it is no * longer necessary and we can just return an error. */ vref(vp); VOP_UNLOCK(vp, 0); pause("ffsupd", 1); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vrele(vp); if ((vp->v_iflag & VI_DOOMED) != 0) return (ENOENT); goto loop; } if (DOINGSOFTDEP(vp)) softdep_update_inodeblock(ip, bp, waitfor); else if (ip->i_effnlink != ip->i_nlink) panic("ffs_update: bad link cnt"); if (I_IS_UFS1(ip)) { *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; - /* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */ + /* + * XXX: FIX? The entropy here is desirable, + * but the harvesting may be expensive + */ random_harvest_queue(&(ip->i_din1), sizeof(ip->i_din1), RANDOM_FS_ATIME); } else { *((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; - /* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */ + /* + * XXX: FIX? The entropy here is desirable, + * but the harvesting may be expensive + */ random_harvest_queue(&(ip->i_din2), sizeof(ip->i_din2), RANDOM_FS_ATIME); } if (waitfor) error = bwrite(bp); else if (vm_page_count_severe() || buf_dirty_count_severe()) { bawrite(bp); error = 0; } else { if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); error = 0; } return (error); } #define SINGLE 0 /* index of single indirect block */ #define DOUBLE 1 /* index of double indirect block */ #define TRIPLE 2 /* index of triple indirect block */ /* * Truncate the inode ip to at most length size, freeing the * disk blocks. */ int ffs_truncate(vp, length, flags, cred) struct vnode *vp; off_t length; int flags; struct ucred *cred; { struct inode *ip; ufs2_daddr_t bn, lbn, lastblock, lastiblock[UFS_NIADDR]; ufs2_daddr_t indir_lbn[UFS_NIADDR], oldblks[UFS_NDADDR + UFS_NIADDR]; ufs2_daddr_t newblks[UFS_NDADDR + UFS_NIADDR]; ufs2_daddr_t count, blocksreleased = 0, datablocks, blkno; struct bufobj *bo; struct fs *fs; struct buf *bp; struct ufsmount *ump; int softdeptrunc, journaltrunc; int needextclean, extblocks; int offset, size, level, nblocks; int i, error, allerror, indiroff, waitforupdate; u_long key; off_t osize; ip = VTOI(vp); ump = VFSTOUFS(vp->v_mount); fs = ump->um_fs; bo = &vp->v_bufobj; ASSERT_VOP_LOCKED(vp, "ffs_truncate"); if (length < 0) return (EINVAL); if (length > fs->fs_maxfilesize) return (EFBIG); #ifdef QUOTA error = getinoquota(ip); if (error) return (error); #endif /* * Historically clients did not have to specify which data * they were truncating. So, if not specified, we assume * traditional behavior, e.g., just the normal data. */ if ((flags & (IO_EXT | IO_NORMAL)) == 0) flags |= IO_NORMAL; if (!DOINGSOFTDEP(vp) && !DOINGASYNC(vp)) flags |= IO_SYNC; waitforupdate = (flags & IO_SYNC) != 0 || !DOINGASYNC(vp); /* * If we are truncating the extended-attributes, and cannot * do it with soft updates, then do it slowly here. If we are * truncating both the extended attributes and the file contents * (e.g., the file is being unlinked), then pick it off with * soft updates below. */ allerror = 0; needextclean = 0; softdeptrunc = 0; journaltrunc = DOINGSUJ(vp); if (journaltrunc == 0 && DOINGSOFTDEP(vp) && length == 0) softdeptrunc = !softdep_slowdown(vp); extblocks = 0; datablocks = DIP(ip, i_blocks); if (fs->fs_magic == FS_UFS2_MAGIC && ip->i_din2->di_extsize > 0) { extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); datablocks -= extblocks; } if ((flags & IO_EXT) && extblocks > 0) { if (length != 0) panic("ffs_truncate: partial trunc of extdata"); if (softdeptrunc || journaltrunc) { if ((flags & IO_NORMAL) == 0) goto extclean; needextclean = 1; } else { if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); #ifdef QUOTA (void) chkdq(ip, -extblocks, NOCRED, 0); #endif vinvalbuf(vp, V_ALT, 0, 0); vn_pages_remove(vp, OFF_TO_IDX(lblktosize(fs, -extblocks)), 0); osize = ip->i_din2->di_extsize; ip->i_din2->di_blocks -= extblocks; ip->i_din2->di_extsize = 0; for (i = 0; i < UFS_NXADDR; i++) { oldblks[i] = ip->i_din2->di_extb[i]; ip->i_din2->di_extb[i] = 0; } ip->i_flag |= IN_CHANGE; if ((error = ffs_update(vp, waitforupdate))) return (error); for (i = 0; i < UFS_NXADDR; i++) { if (oldblks[i] == 0) continue; ffs_blkfree(ump, fs, ITODEVVP(ip), oldblks[i], sblksize(fs, osize, i), ip->i_number, vp->v_type, NULL, SINGLETON_KEY); } } } if ((flags & IO_NORMAL) == 0) return (0); if (vp->v_type == VLNK && (ip->i_size < vp->v_mount->mnt_maxsymlinklen || datablocks == 0)) { #ifdef INVARIANTS if (length != 0) panic("ffs_truncate: partial truncate of symlink"); #endif bzero(SHORTLINK(ip), (u_int)ip->i_size); ip->i_size = 0; DIP_SET(ip, i_size, 0); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (needextclean) goto extclean; return (ffs_update(vp, waitforupdate)); } if (ip->i_size == length) { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (needextclean) goto extclean; return (ffs_update(vp, 0)); } if (fs->fs_ronly) panic("ffs_truncate: read-only filesystem"); if (IS_SNAPSHOT(ip)) ffs_snapremove(vp); vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; osize = ip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { vnode_pager_setsize(vp, length); flags |= BA_CLRBUF; error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); if (error) { vnode_pager_setsize(vp, osize); return (error); } ip->i_size = length; DIP_SET(ip, i_size, length); if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(vp)) bdwrite(bp); else bawrite(bp); ip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(vp, waitforupdate)); } /* * Lookup block number for a given offset. Zero length files * have no blocks, so return a blkno of -1. */ lbn = lblkno(fs, length - 1); if (length == 0) { blkno = -1; } else if (lbn < UFS_NDADDR) { blkno = DIP(ip, i_db[lbn]); } else { error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, cred, BA_METAONLY, &bp); if (error) return (error); indiroff = (lbn - UFS_NDADDR) % NINDIR(fs); if (I_IS_UFS1(ip)) blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff]; else blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff]; /* * If the block number is non-zero, then the indirect block * must have been previously allocated and need not be written. * If the block number is zero, then we may have allocated * the indirect block and hence need to write it out. */ if (blkno != 0) brelse(bp); else if (flags & IO_SYNC) bwrite(bp); else bdwrite(bp); } /* * If the block number at the new end of the file is zero, * then we must allocate it to ensure that the last block of * the file is allocated. Soft updates does not handle this * case, so here we have to clean up the soft updates data * structures describing the allocation past the truncation * point. Finding and deallocating those structures is a lot of * work. Since partial truncation with a hole at the end occurs * rarely, we solve the problem by syncing the file so that it * will have no soft updates data structures left. */ if (blkno == 0 && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); if (blkno != 0 && DOINGSOFTDEP(vp)) { if (softdeptrunc == 0 && journaltrunc == 0) { /* * If soft updates cannot handle this truncation, * clean up soft dependency data structures and * fall through to the synchronous truncation. */ if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); } else { flags = IO_NORMAL | (needextclean ? IO_EXT: 0); if (journaltrunc) softdep_journal_freeblocks(ip, cred, length, flags); else softdep_setup_freeblocks(ip, length, flags); ASSERT_VOP_LOCKED(vp, "ffs_truncate1"); if (journaltrunc == 0) { ip->i_flag |= IN_CHANGE | IN_UPDATE; error = ffs_update(vp, 0); } return (error); } } /* * Shorten the size of the file. If the last block of the * shortened file is unallocated, we must allocate it. * Additionally, if the file is not being truncated to a * block boundary, the contents of the partial block * following the end of the file must be zero'ed in * case it ever becomes accessible again because of * subsequent file growth. Directories however are not * zero'ed as they should grow back initialized to empty. */ offset = blkoff(fs, length); if (blkno != 0 && offset == 0) { ip->i_size = length; DIP_SET(ip, i_size, length); } else { lbn = lblkno(fs, length); flags |= BA_CLRBUF; error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); if (error) return (error); /* * When we are doing soft updates and the UFS_BALLOC * above fills in a direct block hole with a full sized * block that will be truncated down to a fragment below, * we must flush out the block dependency with an FSYNC * so that we do not get a soft updates inconsistency * when we create the fragment below. */ if (DOINGSOFTDEP(vp) && lbn < UFS_NDADDR && fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); ip->i_size = length; DIP_SET(ip, i_size, length); size = blksize(fs, ip, lbn); if (vp->v_type != VDIR && offset != 0) bzero((char *)bp->b_data + offset, (u_int)(size - offset)); /* Kirk's code has reallocbuf(bp, size, 1) here */ allocbuf(bp, size); if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(vp)) bdwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - UFS_NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ffs_indirtrunc below. */ for (level = TRIPLE; level >= SINGLE; level--) { oldblks[UFS_NDADDR + level] = DIP(ip, i_ib[level]); if (lastiblock[level] < 0) { DIP_SET(ip, i_ib[level], 0); lastiblock[level] = -1; } } for (i = 0; i < UFS_NDADDR; i++) { oldblks[i] = DIP(ip, i_db[i]); if (i > lastblock) DIP_SET(ip, i_db[i], 0); } ip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = ffs_update(vp, waitforupdate); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < UFS_NDADDR; i++) { newblks[i] = DIP(ip, i_db[i]); DIP_SET(ip, i_db[i], oldblks[i]); } for (i = 0; i < UFS_NIADDR; i++) { newblks[UFS_NDADDR + i] = DIP(ip, i_ib[i]); DIP_SET(ip, i_ib[i], oldblks[UFS_NDADDR + i]); } ip->i_size = osize; DIP_SET(ip, i_size, osize); error = vtruncbuf(vp, cred, length, fs->fs_bsize); if (error && (allerror == 0)) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -UFS_NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = DIP(ip, i_ib[level]); if (bn != 0) { error = ffs_indirtrunc(ip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { DIP_SET(ip, i_ib[level], 0); ffs_blkfree(ump, fs, ump->um_devvp, bn, fs->fs_bsize, ip->i_number, vp->v_type, NULL, SINGLETON_KEY); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ key = ffs_blkrelease_start(ump, ump->um_devvp, ip->i_number); for (i = UFS_NDADDR - 1; i > lastblock; i--) { long bsize; bn = DIP(ip, i_db[i]); if (bn == 0) continue; DIP_SET(ip, i_db[i], 0); bsize = blksize(fs, ip, i); ffs_blkfree(ump, fs, ump->um_devvp, bn, bsize, ip->i_number, vp->v_type, NULL, key); blocksreleased += btodb(bsize); } ffs_blkrelease_finish(ump, key); if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = DIP(ip, i_db[lastblock]); if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, ip, lastblock); ip->i_size = length; DIP_SET(ip, i_size, length); newspace = blksize(fs, ip, lastblock); if (newspace == 0) panic("ffs_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ffs_blkfree(ump, fs, ump->um_devvp, bn, oldspace - newspace, ip->i_number, vp->v_type, NULL, SINGLETON_KEY); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef INVARIANTS for (level = SINGLE; level <= TRIPLE; level++) if (newblks[UFS_NDADDR + level] != DIP(ip, i_ib[level])) panic("ffs_truncate1"); for (i = 0; i < UFS_NDADDR; i++) if (newblks[i] != DIP(ip, i_db[i])) panic("ffs_truncate2"); BO_LOCK(bo); if (length == 0 && (fs->fs_magic != FS_UFS2_MAGIC || ip->i_din2->di_extsize == 0) && (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) panic("ffs_truncate3"); BO_UNLOCK(bo); #endif /* INVARIANTS */ /* * Put back the real size. */ ip->i_size = length; DIP_SET(ip, i_size, length); if (DIP(ip, i_blocks) >= blocksreleased) DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - blocksreleased); else /* sanity */ DIP_SET(ip, i_blocks, 0); ip->i_flag |= IN_CHANGE; #ifdef QUOTA (void) chkdq(ip, -blocksreleased, NOCRED, 0); #endif return (allerror); extclean: if (journaltrunc) softdep_journal_freeblocks(ip, cred, length, IO_EXT); else softdep_setup_freeblocks(ip, length, IO_EXT); return (ffs_update(vp, waitforupdate)); } /* * Release blocks associated with the inode ip and stored in the indirect * block bn. Blocks are free'd in LIFO order up to (but not including) * lastbn. If level is greater than SINGLE, the block is an indirect block * and recursive calls to indirtrunc must be used to cleanse other indirect * blocks. */ static int ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) struct inode *ip; ufs2_daddr_t lbn, lastbn; ufs2_daddr_t dbn; int level; ufs2_daddr_t *countp; { struct buf *bp; struct fs *fs; struct ufsmount *ump; struct vnode *vp; caddr_t copy = NULL; u_long key; int i, nblocks, error = 0, allerror = 0; ufs2_daddr_t nb, nlbn, last; ufs2_daddr_t blkcount, factor, blocksreleased = 0; ufs1_daddr_t *bap1 = NULL; ufs2_daddr_t *bap2 = NULL; #define BAP(ip, i) (I_IS_UFS1(ip) ? bap1[i] : bap2[i]) fs = ITOFS(ip); ump = ITOUMP(ip); /* * Calculate index in current block of last * block to be kept. -1 indicates the entire * block so we need not calculate the index. */ factor = lbn_offset(fs, level); last = lastbn; if (lastbn > 0) last /= factor; nblocks = btodb(fs->fs_bsize); /* * Get buffer of block pointers, zero those entries corresponding * to blocks to be free'd, and update on disk copy first. Since * double(triple) indirect before single(double) indirect, calls * to bmap on these blocks will fail. However, we already have * the on disk address, so we have to set the b_blkno field * explicitly instead of letting bread do everything for us. */ vp = ITOV(ip); bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0, 0); if ((bp->b_flags & B_CACHE) == 0) { #ifdef RACCT if (racct_enable) { PROC_LOCK(curproc); racct_add_buf(curproc, bp, 0); PROC_UNLOCK(curproc); } #endif /* RACCT */ curthread->td_ru.ru_inblock++; /* pay for read */ bp->b_iocmd = BIO_READ; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; if (bp->b_bcount > bp->b_bufsize) panic("ffs_indirtrunc: bad buffer size"); bp->b_blkno = dbn; vfs_busy_pages(bp, 0); bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); error = bufwait(bp); } if (error) { brelse(bp); *countp = 0; return (error); } if (I_IS_UFS1(ip)) bap1 = (ufs1_daddr_t *)bp->b_data; else bap2 = (ufs2_daddr_t *)bp->b_data; if (lastbn != -1) { copy = malloc(fs->fs_bsize, M_TEMP, M_WAITOK); bcopy((caddr_t)bp->b_data, copy, (u_int)fs->fs_bsize); for (i = last + 1; i < NINDIR(fs); i++) if (I_IS_UFS1(ip)) bap1[i] = 0; else bap2[i] = 0; if (DOINGASYNC(vp)) { bdwrite(bp); } else { error = bwrite(bp); if (error) allerror = error; } if (I_IS_UFS1(ip)) bap1 = (ufs1_daddr_t *)copy; else bap2 = (ufs2_daddr_t *)copy; } /* * Recursively free totally unused blocks. */ key = ffs_blkrelease_start(ump, ITODEVVP(ip), ip->i_number); for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; i--, nlbn += factor) { nb = BAP(ip, i); if (nb == 0) continue; if (level > SINGLE) { if ((error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), (ufs2_daddr_t)-1, level - 1, &blkcount)) != 0) allerror = error; blocksreleased += blkcount; } ffs_blkfree(ump, fs, ITODEVVP(ip), nb, fs->fs_bsize, ip->i_number, vp->v_type, NULL, key); blocksreleased += nblocks; } ffs_blkrelease_finish(ump, key); /* * Recursively free last partial block. */ if (level > SINGLE && lastbn >= 0) { last = lastbn % factor; nb = BAP(ip, i); if (nb != 0) { error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), last, level - 1, &blkcount); if (error) allerror = error; blocksreleased += blkcount; } } if (copy != NULL) { free(copy, M_TEMP); } else { bp->b_flags |= B_INVAL | B_NOCACHE; brelse(bp); } *countp = blocksreleased; return (allerror); } int ffs_rdonly(struct inode *ip) { return (ITOFS(ip)->fs_ronly != 0); } Index: projects/clang700-import/sys/ufs/ffs/ffs_snapshot.c =================================================================== --- projects/clang700-import/sys/ufs/ffs/ffs_snapshot.c (revision 340426) +++ projects/clang700-import/sys/ufs/ffs/ffs_snapshot.c (revision 340427) @@ -1,2706 +1,2706 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 2000 Marshall Kirk McKusick. All Rights Reserved. * * Further information about snapshots can be obtained from: * * Marshall Kirk McKusick http://www.mckusick.com/softdep/ * 1614 Oxford Street mckusick@mckusick.com * Berkeley, CA 94709-1608 +1-510-843-9542 * USA * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_snapshot.c 8.11 (McKusick) 7/23/00 */ #include __FBSDID("$FreeBSD$"); #include "opt_quota.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define KERNCRED thread0.td_ucred #define DEBUG 1 #include "opt_ffs.h" #ifdef NO_FFS_SNAPSHOT int ffs_snapshot(mp, snapfile) struct mount *mp; char *snapfile; { return (EINVAL); } int ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, wkhd) struct fs *fs; struct vnode *devvp; ufs2_daddr_t bno; long size; ino_t inum; enum vtype vtype; struct workhead *wkhd; { return (EINVAL); } void ffs_snapremove(vp) struct vnode *vp; { } void ffs_snapshot_mount(mp) struct mount *mp; { } void ffs_snapshot_unmount(mp) struct mount *mp; { } void ffs_snapgone(ip) struct inode *ip; { } int ffs_copyonwrite(devvp, bp) struct vnode *devvp; struct buf *bp; { return (EINVAL); } void ffs_sync_snap(mp, waitfor) struct mount *mp; int waitfor; { } #else FEATURE(ffs_snapshot, "FFS snapshot support"); LIST_HEAD(, snapdata) snapfree; static struct mtx snapfree_lock; MTX_SYSINIT(ffs_snapfree, &snapfree_lock, "snapdata free list", MTX_DEF); static int cgaccount(int, struct vnode *, struct buf *, int); static int expunge_ufs1(struct vnode *, struct inode *, struct fs *, int (*)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, ufs_lbn_t, int), int, int); static int indiracct_ufs1(struct vnode *, struct vnode *, int, ufs1_daddr_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, struct fs *, int (*)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, ufs_lbn_t, int), int); static int fullacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, ufs_lbn_t, int); static int snapacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, ufs_lbn_t, int); static int mapacct_ufs1(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, ufs_lbn_t, int); static int expunge_ufs2(struct vnode *, struct inode *, struct fs *, int (*)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, ufs_lbn_t, int), int, int); static int indiracct_ufs2(struct vnode *, struct vnode *, int, ufs2_daddr_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, ufs_lbn_t, struct fs *, int (*)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, ufs_lbn_t, int), int); static int fullacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, ufs_lbn_t, int); static int snapacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, ufs_lbn_t, int); static int mapacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, ufs_lbn_t, int); static int readblock(struct vnode *vp, struct buf *, ufs2_daddr_t); static void try_free_snapdata(struct vnode *devvp); static struct snapdata *ffs_snapdata_acquire(struct vnode *devvp); static int ffs_bp_snapblk(struct vnode *, struct buf *); /* * To ensure the consistency of snapshots across crashes, we must * synchronously write out copied blocks before allowing the * originals to be modified. Because of the rather severe speed * penalty that this imposes, the code normally only ensures * persistence for the filesystem metadata contained within a * snapshot. Setting the following flag allows this crash * persistence to be enabled for file contents. */ int dopersistence = 0; #ifdef DEBUG #include SYSCTL_INT(_debug, OID_AUTO, dopersistence, CTLFLAG_RW, &dopersistence, 0, ""); static int snapdebug = 0; SYSCTL_INT(_debug, OID_AUTO, snapdebug, CTLFLAG_RW, &snapdebug, 0, ""); int collectsnapstats = 0; SYSCTL_INT(_debug, OID_AUTO, collectsnapstats, CTLFLAG_RW, &collectsnapstats, 0, ""); #endif /* DEBUG */ /* * Create a snapshot file and initialize it for the filesystem. */ int ffs_snapshot(mp, snapfile) struct mount *mp; char *snapfile; { ufs2_daddr_t numblks, blkno, *blkp, *snapblklist; int error, cg, snaploc; int i, size, len, loc; ufs2_daddr_t blockno; uint64_t flag; struct timespec starttime = {0, 0}, endtime; char saved_nice = 0; long redo = 0, snaplistsize = 0; int32_t *lp; void *space; struct fs *copy_fs = NULL, *fs; struct thread *td = curthread; struct inode *ip, *xp; struct buf *bp, *nbp, *ibp; struct nameidata nd; struct mount *wrtmp; struct vattr vat; struct vnode *vp, *xvp, *mvp, *devvp; struct uio auio; struct iovec aiov; struct snapdata *sn; struct ufsmount *ump; ump = VFSTOUFS(mp); fs = ump->um_fs; sn = NULL; /* * At the moment, journaled soft updates cannot support * taking snapshots. */ if (MOUNTEDSUJ(mp)) { vfs_mount_error(mp, "%s: Snapshots are not yet supported when " "running with journaled soft updates", fs->fs_fsmnt); return (EOPNOTSUPP); } MNT_ILOCK(mp); flag = mp->mnt_flag; MNT_IUNLOCK(mp); /* * Need to serialize access to snapshot code per filesystem. */ /* * Assign a snapshot slot in the superblock. */ UFS_LOCK(ump); for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++) if (fs->fs_snapinum[snaploc] == 0) break; UFS_UNLOCK(ump); if (snaploc == FSMAXSNAP) return (ENOSPC); /* * Create the snapshot file. */ restart: NDINIT(&nd, CREATE, LOCKPARENT | LOCKLEAF | NOCACHE, UIO_SYSSPACE, snapfile, td); if ((error = namei(&nd)) != 0) return (error); if (nd.ni_vp != NULL) { vput(nd.ni_vp); error = EEXIST; } if (nd.ni_dvp->v_mount != mp) error = EXDEV; if (error) { NDFREE(&nd, NDF_ONLY_PNBUF); if (nd.ni_dvp == nd.ni_vp) vrele(nd.ni_dvp); else vput(nd.ni_dvp); return (error); } VATTR_NULL(&vat); vat.va_type = VREG; vat.va_mode = S_IRUSR; vat.va_vaflags |= VA_EXCLUSIVE; if (VOP_GETWRITEMOUNT(nd.ni_dvp, &wrtmp)) wrtmp = NULL; if (wrtmp != mp) panic("ffs_snapshot: mount mismatch"); vfs_rel(wrtmp); if (vn_start_write(NULL, &wrtmp, V_NOWAIT) != 0) { NDFREE(&nd, NDF_ONLY_PNBUF); vput(nd.ni_dvp); if ((error = vn_start_write(NULL, &wrtmp, V_XSLEEP | PCATCH)) != 0) return (error); goto restart; } error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vat); VOP_UNLOCK(nd.ni_dvp, 0); if (error) { NDFREE(&nd, NDF_ONLY_PNBUF); vn_finished_write(wrtmp); vrele(nd.ni_dvp); return (error); } vp = nd.ni_vp; vp->v_vflag |= VV_SYSTEM; ip = VTOI(vp); devvp = ITODEVVP(ip); /* * Allocate and copy the last block contents so as to be able * to set size to that of the filesystem. */ numblks = howmany(fs->fs_size, fs->fs_frag); error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(numblks - 1)), fs->fs_bsize, KERNCRED, BA_CLRBUF, &bp); if (error) goto out; ip->i_size = lblktosize(fs, (off_t)numblks); DIP_SET(ip, i_size, ip->i_size); ip->i_flag |= IN_CHANGE | IN_UPDATE; error = readblock(vp, bp, numblks - 1); bawrite(bp); if (error != 0) goto out; /* * Preallocate critical data structures so that we can copy * them in without further allocation after we suspend all * operations on the filesystem. We would like to just release * the allocated buffers without writing them since they will * be filled in below once we are ready to go, but this upsets * the soft update code, so we go ahead and write the new buffers. * * Allocate all indirect blocks and mark all of them as not * needing to be copied. */ for (blkno = UFS_NDADDR; blkno < numblks; blkno += NINDIR(fs)) { error = UFS_BALLOC(vp, lblktosize(fs, (off_t)blkno), fs->fs_bsize, td->td_ucred, BA_METAONLY, &ibp); if (error) goto out; bawrite(ibp); } /* * Allocate copies for the superblock and its summary information. */ error = UFS_BALLOC(vp, fs->fs_sblockloc, fs->fs_sbsize, KERNCRED, 0, &nbp); if (error) goto out; bawrite(nbp); blkno = fragstoblks(fs, fs->fs_csaddr); len = howmany(fs->fs_cssize, fs->fs_bsize); for (loc = 0; loc < len; loc++) { error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(blkno + loc)), fs->fs_bsize, KERNCRED, 0, &nbp); if (error) goto out; bawrite(nbp); } /* * Allocate all cylinder group blocks. */ for (cg = 0; cg < fs->fs_ncg; cg++) { error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)), fs->fs_bsize, KERNCRED, 0, &nbp); if (error) goto out; bawrite(nbp); if (cg % 10 == 0) ffs_syncvnode(vp, MNT_WAIT, 0); } /* * Copy all the cylinder group maps. Although the * filesystem is still active, we hope that only a few * cylinder groups will change between now and when we * suspend operations. Thus, we will be able to quickly * touch up the few cylinder groups that changed during * the suspension period. */ len = howmany(fs->fs_ncg, NBBY); space = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO); UFS_LOCK(ump); fs->fs_active = space; UFS_UNLOCK(ump); for (cg = 0; cg < fs->fs_ncg; cg++) { error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)), fs->fs_bsize, KERNCRED, 0, &nbp); if (error) goto out; error = cgaccount(cg, vp, nbp, 1); bawrite(nbp); if (cg % 10 == 0) ffs_syncvnode(vp, MNT_WAIT, 0); if (error) goto out; } /* * Change inode to snapshot type file. */ ip->i_flags |= SF_SNAPSHOT; DIP_SET(ip, i_flags, ip->i_flags); ip->i_flag |= IN_CHANGE | IN_UPDATE; /* * Ensure that the snapshot is completely on disk. * Since we have marked it as a snapshot it is safe to * unlock it as no process will be allowed to write to it. */ if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) goto out; VOP_UNLOCK(vp, 0); /* * All allocations are done, so we can now snapshot the system. * * Recind nice scheduling while running with the filesystem suspended. */ if (td->td_proc->p_nice > 0) { struct proc *p; p = td->td_proc; PROC_LOCK(p); saved_nice = p->p_nice; sched_nice(p, 0); PROC_UNLOCK(p); } /* * Suspend operation on filesystem. */ for (;;) { vn_finished_write(wrtmp); if ((error = vfs_write_suspend(vp->v_mount, 0)) != 0) { vn_start_write(NULL, &wrtmp, V_WAIT); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); goto out; } if (mp->mnt_kern_flag & MNTK_SUSPENDED) break; vn_start_write(NULL, &wrtmp, V_WAIT); } vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (ip->i_effnlink == 0) { error = ENOENT; /* Snapshot file unlinked */ goto out1; } if (collectsnapstats) nanotime(&starttime); /* The last block might have changed. Copy it again to be sure. */ error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(numblks - 1)), fs->fs_bsize, KERNCRED, BA_CLRBUF, &bp); if (error != 0) goto out1; error = readblock(vp, bp, numblks - 1); bp->b_flags |= B_VALIDSUSPWRT; bawrite(bp); if (error != 0) goto out1; /* * First, copy all the cylinder group maps that have changed. */ for (cg = 0; cg < fs->fs_ncg; cg++) { if ((ACTIVECGNUM(fs, cg) & ACTIVECGOFF(cg)) != 0) continue; redo++; error = UFS_BALLOC(vp, lfragtosize(fs, cgtod(fs, cg)), fs->fs_bsize, KERNCRED, 0, &nbp); if (error) goto out1; error = cgaccount(cg, vp, nbp, 2); bawrite(nbp); if (error) goto out1; } /* * Grab a copy of the superblock and its summary information. * We delay writing it until the suspension is released below. */ copy_fs = malloc((u_long)fs->fs_bsize, M_UFSMNT, M_WAITOK); bcopy(fs, copy_fs, fs->fs_sbsize); if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) copy_fs->fs_clean = 1; size = fs->fs_bsize < SBLOCKSIZE ? fs->fs_bsize : SBLOCKSIZE; if (fs->fs_sbsize < size) bzero(&((char *)copy_fs)[fs->fs_sbsize], size - fs->fs_sbsize); size = blkroundup(fs, fs->fs_cssize); if (fs->fs_contigsumsize > 0) size += fs->fs_ncg * sizeof(int32_t); space = malloc((u_long)size, M_UFSMNT, M_WAITOK); copy_fs->fs_csp = space; bcopy(fs->fs_csp, copy_fs->fs_csp, fs->fs_cssize); space = (char *)space + fs->fs_cssize; loc = howmany(fs->fs_cssize, fs->fs_fsize); i = fs->fs_frag - loc % fs->fs_frag; len = (i == fs->fs_frag) ? 0 : i * fs->fs_fsize; if (len > 0) { if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + loc), len, KERNCRED, &bp)) != 0) { brelse(bp); free(copy_fs->fs_csp, M_UFSMNT); free(copy_fs, M_UFSMNT); copy_fs = NULL; goto out1; } bcopy(bp->b_data, space, (u_int)len); space = (char *)space + len; bp->b_flags |= B_INVAL | B_NOCACHE; brelse(bp); } if (fs->fs_contigsumsize > 0) { copy_fs->fs_maxcluster = lp = space; for (i = 0; i < fs->fs_ncg; i++) *lp++ = fs->fs_contigsumsize; } /* * We must check for active files that have been unlinked * (e.g., with a zero link count). We have to expunge all * trace of these files from the snapshot so that they are * not reclaimed prematurely by fsck or unnecessarily dumped. * We turn off the MNTK_SUSPENDED flag to avoid a panic from * spec_strategy about writing on a suspended filesystem. * Note that we skip unlinked snapshot files as they will * be handled separately below. * * We also calculate the needed size for the snapshot list. */ snaplistsize = fs->fs_ncg + howmany(fs->fs_cssize, fs->fs_bsize) + FSMAXSNAP + 1 /* superblock */ + 1 /* last block */ + 1 /* size */; MNT_ILOCK(mp); mp->mnt_kern_flag &= ~MNTK_SUSPENDED; MNT_IUNLOCK(mp); loop: MNT_VNODE_FOREACH_ALL(xvp, mp, mvp) { if ((xvp->v_usecount == 0 && (xvp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == 0) || xvp->v_type == VNON || IS_SNAPSHOT(VTOI(xvp))) { VI_UNLOCK(xvp); continue; } /* * We can skip parent directory vnode because it must have * this snapshot file in it. */ if (xvp == nd.ni_dvp) { VI_UNLOCK(xvp); continue; } vholdl(xvp); if (vn_lock(xvp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); vdrop(xvp); goto loop; } VI_LOCK(xvp); if (xvp->v_usecount == 0 && (xvp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == 0) { VI_UNLOCK(xvp); VOP_UNLOCK(xvp, 0); vdrop(xvp); continue; } VI_UNLOCK(xvp); if (snapdebug) vn_printf(xvp, "ffs_snapshot: busy vnode "); if (VOP_GETATTR(xvp, &vat, td->td_ucred) == 0 && vat.va_nlink > 0) { VOP_UNLOCK(xvp, 0); vdrop(xvp); continue; } xp = VTOI(xvp); if (ffs_checkfreefile(copy_fs, vp, xp->i_number)) { VOP_UNLOCK(xvp, 0); vdrop(xvp); continue; } /* * If there is a fragment, clear it here. */ blkno = 0; loc = howmany(xp->i_size, fs->fs_bsize) - 1; if (loc < UFS_NDADDR) { len = fragroundup(fs, blkoff(fs, xp->i_size)); if (len != 0 && len < fs->fs_bsize) { ffs_blkfree(ump, copy_fs, vp, DIP(xp, i_db[loc]), len, xp->i_number, xvp->v_type, NULL, SINGLETON_KEY); blkno = DIP(xp, i_db[loc]); DIP_SET(xp, i_db[loc], 0); } } snaplistsize += 1; if (I_IS_UFS1(xp)) error = expunge_ufs1(vp, xp, copy_fs, fullacct_ufs1, BLK_NOCOPY, 1); else error = expunge_ufs2(vp, xp, copy_fs, fullacct_ufs2, BLK_NOCOPY, 1); if (blkno) DIP_SET(xp, i_db[loc], blkno); if (!error) error = ffs_freefile(ump, copy_fs, vp, xp->i_number, xp->i_mode, NULL); VOP_UNLOCK(xvp, 0); vdrop(xvp); if (error) { free(copy_fs->fs_csp, M_UFSMNT); free(copy_fs, M_UFSMNT); copy_fs = NULL; MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto out1; } } /* * Erase the journal file from the snapshot. */ if (fs->fs_flags & FS_SUJ) { error = softdep_journal_lookup(mp, &xvp); if (error) { free(copy_fs->fs_csp, M_UFSMNT); free(copy_fs, M_UFSMNT); copy_fs = NULL; goto out1; } xp = VTOI(xvp); if (I_IS_UFS1(xp)) error = expunge_ufs1(vp, xp, copy_fs, fullacct_ufs1, BLK_NOCOPY, 0); else error = expunge_ufs2(vp, xp, copy_fs, fullacct_ufs2, BLK_NOCOPY, 0); vput(xvp); } /* * Acquire a lock on the snapdata structure, creating it if necessary. */ sn = ffs_snapdata_acquire(devvp); /* * Change vnode to use shared snapshot lock instead of the original * private lock. */ vp->v_vnlock = &sn->sn_lock; lockmgr(&vp->v_lock, LK_RELEASE, NULL); xp = TAILQ_FIRST(&sn->sn_head); /* * If this is the first snapshot on this filesystem, then we need * to allocate the space for the list of preallocated snapshot blocks. * This list will be refined below, but this preliminary one will * keep us out of deadlock until the full one is ready. */ if (xp == NULL) { snapblklist = malloc(snaplistsize * sizeof(daddr_t), M_UFSMNT, M_WAITOK); blkp = &snapblklist[1]; *blkp++ = lblkno(fs, fs->fs_sblockloc); blkno = fragstoblks(fs, fs->fs_csaddr); for (cg = 0; cg < fs->fs_ncg; cg++) { if (fragstoblks(fs, cgtod(fs, cg) > blkno)) break; *blkp++ = fragstoblks(fs, cgtod(fs, cg)); } len = howmany(fs->fs_cssize, fs->fs_bsize); for (loc = 0; loc < len; loc++) *blkp++ = blkno + loc; for (; cg < fs->fs_ncg; cg++) *blkp++ = fragstoblks(fs, cgtod(fs, cg)); snapblklist[0] = blkp - snapblklist; VI_LOCK(devvp); if (sn->sn_blklist != NULL) panic("ffs_snapshot: non-empty list"); sn->sn_blklist = snapblklist; sn->sn_listsize = blkp - snapblklist; VI_UNLOCK(devvp); } /* * Record snapshot inode. Since this is the newest snapshot, * it must be placed at the end of the list. */ VI_LOCK(devvp); fs->fs_snapinum[snaploc] = ip->i_number; if (ip->i_nextsnap.tqe_prev != 0) panic("ffs_snapshot: %ju already on list", (uintmax_t)ip->i_number); TAILQ_INSERT_TAIL(&sn->sn_head, ip, i_nextsnap); devvp->v_vflag |= VV_COPYONWRITE; VI_UNLOCK(devvp); ASSERT_VOP_LOCKED(vp, "ffs_snapshot vp"); out1: KASSERT((sn != NULL && copy_fs != NULL && error == 0) || (sn == NULL && copy_fs == NULL && error != 0), ("email phk@ and mckusick@")); /* * Resume operation on filesystem. */ vfs_write_resume(vp->v_mount, VR_START_WRITE | VR_NO_SUSPCLR); if (collectsnapstats && starttime.tv_sec > 0) { nanotime(&endtime); timespecsub(&endtime, &starttime, &endtime); printf("%s: suspended %ld.%03ld sec, redo %ld of %d\n", vp->v_mount->mnt_stat.f_mntonname, (long)endtime.tv_sec, endtime.tv_nsec / 1000000, redo, fs->fs_ncg); } if (copy_fs == NULL) goto out; /* * Copy allocation information from all the snapshots in * this snapshot and then expunge them from its view. */ TAILQ_FOREACH(xp, &sn->sn_head, i_nextsnap) { if (xp == ip) break; if (I_IS_UFS1(xp)) error = expunge_ufs1(vp, xp, fs, snapacct_ufs1, BLK_SNAP, 0); else error = expunge_ufs2(vp, xp, fs, snapacct_ufs2, BLK_SNAP, 0); if (error == 0 && xp->i_effnlink == 0) { error = ffs_freefile(ump, copy_fs, vp, xp->i_number, xp->i_mode, NULL); } if (error) { fs->fs_snapinum[snaploc] = 0; goto done; } } /* * Allocate space for the full list of preallocated snapshot blocks. */ snapblklist = malloc(snaplistsize * sizeof(daddr_t), M_UFSMNT, M_WAITOK); ip->i_snapblklist = &snapblklist[1]; /* * Expunge the blocks used by the snapshots from the set of * blocks marked as used in the snapshot bitmaps. Also, collect * the list of allocated blocks in i_snapblklist. */ if (I_IS_UFS1(ip)) error = expunge_ufs1(vp, ip, copy_fs, mapacct_ufs1, BLK_SNAP, 0); else error = expunge_ufs2(vp, ip, copy_fs, mapacct_ufs2, BLK_SNAP, 0); if (error) { fs->fs_snapinum[snaploc] = 0; free(snapblklist, M_UFSMNT); goto done; } if (snaplistsize < ip->i_snapblklist - snapblklist) panic("ffs_snapshot: list too small"); snaplistsize = ip->i_snapblklist - snapblklist; snapblklist[0] = snaplistsize; ip->i_snapblklist = 0; /* * Write out the list of allocated blocks to the end of the snapshot. */ auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = (void *)snapblklist; aiov.iov_len = snaplistsize * sizeof(daddr_t); auio.uio_resid = aiov.iov_len; auio.uio_offset = ip->i_size; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; auio.uio_td = td; if ((error = VOP_WRITE(vp, &auio, IO_UNIT, td->td_ucred)) != 0) { fs->fs_snapinum[snaploc] = 0; free(snapblklist, M_UFSMNT); goto done; } /* * Write the superblock and its summary information * to the snapshot. */ blkno = fragstoblks(fs, fs->fs_csaddr); len = howmany(fs->fs_cssize, fs->fs_bsize); space = copy_fs->fs_csp; for (loc = 0; loc < len; loc++) { error = bread(vp, blkno + loc, fs->fs_bsize, KERNCRED, &nbp); if (error) { brelse(nbp); fs->fs_snapinum[snaploc] = 0; free(snapblklist, M_UFSMNT); goto done; } bcopy(space, nbp->b_data, fs->fs_bsize); space = (char *)space + fs->fs_bsize; bawrite(nbp); } error = bread(vp, lblkno(fs, fs->fs_sblockloc), fs->fs_bsize, KERNCRED, &nbp); if (error) { brelse(nbp); } else { loc = blkoff(fs, fs->fs_sblockloc); bcopy((char *)copy_fs, &nbp->b_data[loc], (u_int)fs->fs_sbsize); bawrite(nbp); } /* * As this is the newest list, it is the most inclusive, so * should replace the previous list. */ VI_LOCK(devvp); space = sn->sn_blklist; sn->sn_blklist = snapblklist; sn->sn_listsize = snaplistsize; VI_UNLOCK(devvp); if (space != NULL) free(space, M_UFSMNT); /* * Preallocate all the direct blocks in the snapshot inode so * that we never have to write the inode itself to commit an * update to the contents of the snapshot. Note that once * created, the size of the snapshot will never change, so * there will never be a need to write the inode except to * update the non-integrity-critical time fields and * allocated-block count. */ for (blockno = 0; blockno < UFS_NDADDR; blockno++) { if (DIP(ip, i_db[blockno]) != 0) continue; error = UFS_BALLOC(vp, lblktosize(fs, blockno), fs->fs_bsize, KERNCRED, BA_CLRBUF, &bp); if (error) break; error = readblock(vp, bp, blockno); bawrite(bp); if (error != 0) break; } done: free(copy_fs->fs_csp, M_UFSMNT); free(copy_fs, M_UFSMNT); copy_fs = NULL; out: NDFREE(&nd, NDF_ONLY_PNBUF); if (saved_nice > 0) { struct proc *p; p = td->td_proc; PROC_LOCK(p); sched_nice(td->td_proc, saved_nice); PROC_UNLOCK(td->td_proc); } UFS_LOCK(ump); if (fs->fs_active != 0) { free(fs->fs_active, M_DEVBUF); fs->fs_active = 0; } UFS_UNLOCK(ump); MNT_ILOCK(mp); mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA); MNT_IUNLOCK(mp); if (error) (void) ffs_truncate(vp, (off_t)0, 0, NOCRED); (void) ffs_syncvnode(vp, MNT_WAIT, 0); if (error) vput(vp); else VOP_UNLOCK(vp, 0); vrele(nd.ni_dvp); vn_finished_write(wrtmp); process_deferred_inactive(mp); return (error); } /* * Copy a cylinder group map. All the unallocated blocks are marked * BLK_NOCOPY so that the snapshot knows that it need not copy them * if they are later written. If passno is one, then this is a first * pass, so only setting needs to be done. If passno is 2, then this * is a revision to a previous pass which must be undone as the * replacement pass is done. */ static int cgaccount(cg, vp, nbp, passno) int cg; struct vnode *vp; struct buf *nbp; int passno; { struct buf *bp, *ibp; struct inode *ip; struct cg *cgp; struct fs *fs; ufs2_daddr_t base, numblks; int error, len, loc, indiroff; ip = VTOI(vp); fs = ITOFS(ip); if ((error = ffs_getcg(fs, ITODEVVP(ip), cg, &bp, &cgp)) != 0) return (error); UFS_LOCK(ITOUMP(ip)); ACTIVESET(fs, cg); /* * Recomputation of summary information might not have been performed * at mount time. Sync up summary information for current cylinder * group while data is in memory to ensure that result of background * fsck is slightly more consistent. */ fs->fs_cs(fs, cg) = cgp->cg_cs; UFS_UNLOCK(ITOUMP(ip)); bcopy(bp->b_data, nbp->b_data, fs->fs_cgsize); if (fs->fs_cgsize < fs->fs_bsize) bzero(&nbp->b_data[fs->fs_cgsize], fs->fs_bsize - fs->fs_cgsize); cgp = (struct cg *)nbp->b_data; bqrelse(bp); if (passno == 2) nbp->b_flags |= B_VALIDSUSPWRT; numblks = howmany(fs->fs_size, fs->fs_frag); len = howmany(fs->fs_fpg, fs->fs_frag); base = cgbase(fs, cg) / fs->fs_frag; if (base + len >= numblks) len = numblks - base - 1; loc = 0; if (base < UFS_NDADDR) { for ( ; loc < UFS_NDADDR; loc++) { if (ffs_isblock(fs, cg_blksfree(cgp), loc)) DIP_SET(ip, i_db[loc], BLK_NOCOPY); else if (passno == 2 && DIP(ip, i_db[loc])== BLK_NOCOPY) DIP_SET(ip, i_db[loc], 0); else if (passno == 1 && DIP(ip, i_db[loc])== BLK_NOCOPY) panic("ffs_snapshot: lost direct block"); } } error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(base + loc)), fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); if (error) { goto out; } indiroff = (base + loc - UFS_NDADDR) % NINDIR(fs); for ( ; loc < len; loc++, indiroff++) { if (indiroff >= NINDIR(fs)) { if (passno == 2) ibp->b_flags |= B_VALIDSUSPWRT; bawrite(ibp); error = UFS_BALLOC(vp, lblktosize(fs, (off_t)(base + loc)), fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); if (error) { goto out; } indiroff = 0; } if (I_IS_UFS1(ip)) { if (ffs_isblock(fs, cg_blksfree(cgp), loc)) ((ufs1_daddr_t *)(ibp->b_data))[indiroff] = BLK_NOCOPY; else if (passno == 2 && ((ufs1_daddr_t *)(ibp->b_data)) [indiroff] == BLK_NOCOPY) ((ufs1_daddr_t *)(ibp->b_data))[indiroff] = 0; else if (passno == 1 && ((ufs1_daddr_t *)(ibp->b_data)) [indiroff] == BLK_NOCOPY) panic("ffs_snapshot: lost indirect block"); continue; } if (ffs_isblock(fs, cg_blksfree(cgp), loc)) ((ufs2_daddr_t *)(ibp->b_data))[indiroff] = BLK_NOCOPY; else if (passno == 2 && ((ufs2_daddr_t *)(ibp->b_data)) [indiroff] == BLK_NOCOPY) ((ufs2_daddr_t *)(ibp->b_data))[indiroff] = 0; else if (passno == 1 && ((ufs2_daddr_t *)(ibp->b_data)) [indiroff] == BLK_NOCOPY) panic("ffs_snapshot: lost indirect block"); } if (passno == 2) ibp->b_flags |= B_VALIDSUSPWRT; bdwrite(ibp); out: /* * We have to calculate the crc32c here rather than just setting the * BX_CYLGRP b_xflags because the allocation of the block for the * the cylinder group map will always be a full size block (fs_bsize) * even though the cylinder group may be smaller (fs_cgsize). The * crc32c must be computed only over fs_cgsize whereas the BX_CYLGRP * flag causes it to be computed over the size of the buffer. */ if ((fs->fs_metackhash & CK_CYLGRP) != 0) { ((struct cg *)nbp->b_data)->cg_ckhash = 0; ((struct cg *)nbp->b_data)->cg_ckhash = calculate_crc32c(~0L, nbp->b_data, fs->fs_cgsize); } return (error); } /* * Before expunging a snapshot inode, note all the * blocks that it claims with BLK_SNAP so that fsck will * be able to account for those blocks properly and so * that this snapshot knows that it need not copy them * if the other snapshot holding them is freed. This code * is reproduced once each for UFS1 and UFS2. */ static int expunge_ufs1(snapvp, cancelip, fs, acctfunc, expungetype, clearmode) struct vnode *snapvp; struct inode *cancelip; struct fs *fs; int (*acctfunc)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, ufs_lbn_t, int); int expungetype; int clearmode; { int i, error, indiroff; ufs_lbn_t lbn, rlbn; ufs2_daddr_t len, blkno, numblks, blksperindir; struct ufs1_dinode *dip; struct thread *td = curthread; struct buf *bp; /* * Prepare to expunge the inode. If its inode block has not * yet been copied, then allocate and fill the copy. */ lbn = fragstoblks(fs, ino_to_fsba(fs, cancelip->i_number)); blkno = 0; if (lbn < UFS_NDADDR) { blkno = VTOI(snapvp)->i_din1->di_db[lbn]; } else { if (DOINGSOFTDEP(snapvp)) softdep_prealloc(snapvp, MNT_WAIT); td->td_pflags |= TDP_COWINPROGRESS; error = ffs_balloc_ufs1(snapvp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, BA_METAONLY, &bp); td->td_pflags &= ~TDP_COWINPROGRESS; if (error) return (error); indiroff = (lbn - UFS_NDADDR) % NINDIR(fs); blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff]; bqrelse(bp); } if (blkno != 0) { if ((error = bread(snapvp, lbn, fs->fs_bsize, KERNCRED, &bp))) return (error); } else { error = ffs_balloc_ufs1(snapvp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, 0, &bp); if (error) return (error); if ((error = readblock(snapvp, bp, lbn)) != 0) return (error); } /* * Set a snapshot inode to be a zero length file, regular files * or unlinked snapshots to be completely unallocated. */ dip = (struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, cancelip->i_number); if (clearmode || cancelip->i_effnlink == 0) dip->di_mode = 0; dip->di_size = 0; dip->di_blocks = 0; dip->di_flags &= ~SF_SNAPSHOT; bzero(&dip->di_db[0], (UFS_NDADDR + UFS_NIADDR) * sizeof(ufs1_daddr_t)); bdwrite(bp); /* * Now go through and expunge all the blocks in the file * using the function requested. */ numblks = howmany(cancelip->i_size, fs->fs_bsize); if ((error = (*acctfunc)(snapvp, &cancelip->i_din1->di_db[0], &cancelip->i_din1->di_db[UFS_NDADDR], fs, 0, expungetype))) return (error); if ((error = (*acctfunc)(snapvp, &cancelip->i_din1->di_ib[0], &cancelip->i_din1->di_ib[UFS_NIADDR], fs, -1, expungetype))) return (error); blksperindir = 1; lbn = -UFS_NDADDR; len = numblks - UFS_NDADDR; rlbn = UFS_NDADDR; for (i = 0; len > 0 && i < UFS_NIADDR; i++) { error = indiracct_ufs1(snapvp, ITOV(cancelip), i, cancelip->i_din1->di_ib[i], lbn, rlbn, len, blksperindir, fs, acctfunc, expungetype); if (error) return (error); blksperindir *= NINDIR(fs); lbn -= blksperindir + 1; len -= blksperindir; rlbn += blksperindir; } return (0); } /* * Descend an indirect block chain for vnode cancelvp accounting for all * its indirect blocks in snapvp. */ static int indiracct_ufs1(snapvp, cancelvp, level, blkno, lbn, rlbn, remblks, blksperindir, fs, acctfunc, expungetype) struct vnode *snapvp; struct vnode *cancelvp; int level; ufs1_daddr_t blkno; ufs_lbn_t lbn; ufs_lbn_t rlbn; ufs_lbn_t remblks; ufs_lbn_t blksperindir; struct fs *fs; int (*acctfunc)(struct vnode *, ufs1_daddr_t *, ufs1_daddr_t *, struct fs *, ufs_lbn_t, int); int expungetype; { int error, num, i; ufs_lbn_t subblksperindir; struct indir indirs[UFS_NIADDR + 2]; ufs1_daddr_t last, *bap; struct buf *bp; if (blkno == 0) { if (expungetype == BLK_NOCOPY) return (0); panic("indiracct_ufs1: missing indir"); } if ((error = ufs_getlbns(cancelvp, rlbn, indirs, &num)) != 0) return (error); if (lbn != indirs[num - 1 - level].in_lbn || num < 2) panic("indiracct_ufs1: botched params"); /* * We have to expand bread here since it will deadlock looking * up the block number for any blocks that are not in the cache. */ bp = getblk(cancelvp, lbn, fs->fs_bsize, 0, 0, 0); bp->b_blkno = fsbtodb(fs, blkno); if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0 && (error = readblock(cancelvp, bp, fragstoblks(fs, blkno)))) { brelse(bp); return (error); } /* * Account for the block pointers in this indirect block. */ last = howmany(remblks, blksperindir); if (last > NINDIR(fs)) last = NINDIR(fs); bap = malloc(fs->fs_bsize, M_DEVBUF, M_WAITOK); bcopy(bp->b_data, (caddr_t)bap, fs->fs_bsize); bqrelse(bp); error = (*acctfunc)(snapvp, &bap[0], &bap[last], fs, level == 0 ? rlbn : -1, expungetype); if (error || level == 0) goto out; /* * Account for the block pointers in each of the indirect blocks * in the levels below us. */ subblksperindir = blksperindir / NINDIR(fs); for (lbn++, level--, i = 0; i < last; i++) { error = indiracct_ufs1(snapvp, cancelvp, level, bap[i], lbn, rlbn, remblks, subblksperindir, fs, acctfunc, expungetype); if (error) goto out; rlbn += blksperindir; lbn -= blksperindir; remblks -= blksperindir; } out: free(bap, M_DEVBUF); return (error); } /* * Do both snap accounting and map accounting. */ static int fullacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, exptype) struct vnode *vp; ufs1_daddr_t *oldblkp, *lastblkp; struct fs *fs; ufs_lbn_t lblkno; int exptype; /* BLK_SNAP or BLK_NOCOPY */ { int error; if ((error = snapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, exptype))) return (error); return (mapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, exptype)); } /* * Identify a set of blocks allocated in a snapshot inode. */ static int snapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, expungetype) struct vnode *vp; ufs1_daddr_t *oldblkp, *lastblkp; struct fs *fs; ufs_lbn_t lblkno; int expungetype; /* BLK_SNAP or BLK_NOCOPY */ { struct inode *ip = VTOI(vp); ufs1_daddr_t blkno, *blkp; ufs_lbn_t lbn; struct buf *ibp; int error; for ( ; oldblkp < lastblkp; oldblkp++) { blkno = *oldblkp; if (blkno == 0 || blkno == BLK_NOCOPY || blkno == BLK_SNAP) continue; lbn = fragstoblks(fs, blkno); if (lbn < UFS_NDADDR) { blkp = &ip->i_din1->di_db[lbn]; ip->i_flag |= IN_CHANGE | IN_UPDATE; } else { error = ffs_balloc_ufs1(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); if (error) return (error); blkp = &((ufs1_daddr_t *)(ibp->b_data)) [(lbn - UFS_NDADDR) % NINDIR(fs)]; } /* * If we are expunging a snapshot vnode and we * find a block marked BLK_NOCOPY, then it is * one that has been allocated to this snapshot after * we took our current snapshot and can be ignored. */ if (expungetype == BLK_SNAP && *blkp == BLK_NOCOPY) { if (lbn >= UFS_NDADDR) brelse(ibp); } else { if (*blkp != 0) panic("snapacct_ufs1: bad block"); *blkp = expungetype; if (lbn >= UFS_NDADDR) bdwrite(ibp); } } return (0); } /* * Account for a set of blocks allocated in a snapshot inode. */ static int mapacct_ufs1(vp, oldblkp, lastblkp, fs, lblkno, expungetype) struct vnode *vp; ufs1_daddr_t *oldblkp, *lastblkp; struct fs *fs; ufs_lbn_t lblkno; int expungetype; { ufs1_daddr_t blkno; struct inode *ip; ino_t inum; int acctit; ip = VTOI(vp); inum = ip->i_number; if (lblkno == -1) acctit = 0; else acctit = 1; for ( ; oldblkp < lastblkp; oldblkp++, lblkno++) { blkno = *oldblkp; if (blkno == 0 || blkno == BLK_NOCOPY) continue; if (acctit && expungetype == BLK_SNAP && blkno != BLK_SNAP) *ip->i_snapblklist++ = lblkno; if (blkno == BLK_SNAP) blkno = blkstofrags(fs, lblkno); ffs_blkfree(ITOUMP(ip), fs, vp, blkno, fs->fs_bsize, inum, vp->v_type, NULL, SINGLETON_KEY); } return (0); } /* * Before expunging a snapshot inode, note all the * blocks that it claims with BLK_SNAP so that fsck will * be able to account for those blocks properly and so * that this snapshot knows that it need not copy them * if the other snapshot holding them is freed. This code * is reproduced once each for UFS1 and UFS2. */ static int expunge_ufs2(snapvp, cancelip, fs, acctfunc, expungetype, clearmode) struct vnode *snapvp; struct inode *cancelip; struct fs *fs; int (*acctfunc)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, ufs_lbn_t, int); int expungetype; int clearmode; { int i, error, indiroff; ufs_lbn_t lbn, rlbn; ufs2_daddr_t len, blkno, numblks, blksperindir; struct ufs2_dinode *dip; struct thread *td = curthread; struct buf *bp; /* * Prepare to expunge the inode. If its inode block has not * yet been copied, then allocate and fill the copy. */ lbn = fragstoblks(fs, ino_to_fsba(fs, cancelip->i_number)); blkno = 0; if (lbn < UFS_NDADDR) { blkno = VTOI(snapvp)->i_din2->di_db[lbn]; } else { if (DOINGSOFTDEP(snapvp)) softdep_prealloc(snapvp, MNT_WAIT); td->td_pflags |= TDP_COWINPROGRESS; error = ffs_balloc_ufs2(snapvp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, BA_METAONLY, &bp); td->td_pflags &= ~TDP_COWINPROGRESS; if (error) return (error); indiroff = (lbn - UFS_NDADDR) % NINDIR(fs); blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff]; bqrelse(bp); } if (blkno != 0) { if ((error = bread(snapvp, lbn, fs->fs_bsize, KERNCRED, &bp))) return (error); } else { error = ffs_balloc_ufs2(snapvp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, 0, &bp); if (error) return (error); if ((error = readblock(snapvp, bp, lbn)) != 0) return (error); } /* * Set a snapshot inode to be a zero length file, regular files * to be completely unallocated. */ dip = (struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, cancelip->i_number); - if (clearmode || cancelip->i_effnlink == 0) - dip->di_mode = 0; dip->di_size = 0; dip->di_blocks = 0; dip->di_flags &= ~SF_SNAPSHOT; bzero(&dip->di_db[0], (UFS_NDADDR + UFS_NIADDR) * sizeof(ufs2_daddr_t)); + if (clearmode || cancelip->i_effnlink == 0) + dip->di_mode = 0; bdwrite(bp); /* * Now go through and expunge all the blocks in the file * using the function requested. */ numblks = howmany(cancelip->i_size, fs->fs_bsize); if ((error = (*acctfunc)(snapvp, &cancelip->i_din2->di_db[0], &cancelip->i_din2->di_db[UFS_NDADDR], fs, 0, expungetype))) return (error); if ((error = (*acctfunc)(snapvp, &cancelip->i_din2->di_ib[0], &cancelip->i_din2->di_ib[UFS_NIADDR], fs, -1, expungetype))) return (error); blksperindir = 1; lbn = -UFS_NDADDR; len = numblks - UFS_NDADDR; rlbn = UFS_NDADDR; for (i = 0; len > 0 && i < UFS_NIADDR; i++) { error = indiracct_ufs2(snapvp, ITOV(cancelip), i, cancelip->i_din2->di_ib[i], lbn, rlbn, len, blksperindir, fs, acctfunc, expungetype); if (error) return (error); blksperindir *= NINDIR(fs); lbn -= blksperindir + 1; len -= blksperindir; rlbn += blksperindir; } return (0); } /* * Descend an indirect block chain for vnode cancelvp accounting for all * its indirect blocks in snapvp. */ static int indiracct_ufs2(snapvp, cancelvp, level, blkno, lbn, rlbn, remblks, blksperindir, fs, acctfunc, expungetype) struct vnode *snapvp; struct vnode *cancelvp; int level; ufs2_daddr_t blkno; ufs_lbn_t lbn; ufs_lbn_t rlbn; ufs_lbn_t remblks; ufs_lbn_t blksperindir; struct fs *fs; int (*acctfunc)(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *, struct fs *, ufs_lbn_t, int); int expungetype; { int error, num, i; ufs_lbn_t subblksperindir; struct indir indirs[UFS_NIADDR + 2]; ufs2_daddr_t last, *bap; struct buf *bp; if (blkno == 0) { if (expungetype == BLK_NOCOPY) return (0); panic("indiracct_ufs2: missing indir"); } if ((error = ufs_getlbns(cancelvp, rlbn, indirs, &num)) != 0) return (error); if (lbn != indirs[num - 1 - level].in_lbn || num < 2) panic("indiracct_ufs2: botched params"); /* * We have to expand bread here since it will deadlock looking * up the block number for any blocks that are not in the cache. */ bp = getblk(cancelvp, lbn, fs->fs_bsize, 0, 0, 0); bp->b_blkno = fsbtodb(fs, blkno); if ((bp->b_flags & B_CACHE) == 0 && (error = readblock(cancelvp, bp, fragstoblks(fs, blkno)))) { brelse(bp); return (error); } /* * Account for the block pointers in this indirect block. */ last = howmany(remblks, blksperindir); if (last > NINDIR(fs)) last = NINDIR(fs); bap = malloc(fs->fs_bsize, M_DEVBUF, M_WAITOK); bcopy(bp->b_data, (caddr_t)bap, fs->fs_bsize); bqrelse(bp); error = (*acctfunc)(snapvp, &bap[0], &bap[last], fs, level == 0 ? rlbn : -1, expungetype); if (error || level == 0) goto out; /* * Account for the block pointers in each of the indirect blocks * in the levels below us. */ subblksperindir = blksperindir / NINDIR(fs); for (lbn++, level--, i = 0; i < last; i++) { error = indiracct_ufs2(snapvp, cancelvp, level, bap[i], lbn, rlbn, remblks, subblksperindir, fs, acctfunc, expungetype); if (error) goto out; rlbn += blksperindir; lbn -= blksperindir; remblks -= blksperindir; } out: free(bap, M_DEVBUF); return (error); } /* * Do both snap accounting and map accounting. */ static int fullacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, exptype) struct vnode *vp; ufs2_daddr_t *oldblkp, *lastblkp; struct fs *fs; ufs_lbn_t lblkno; int exptype; /* BLK_SNAP or BLK_NOCOPY */ { int error; if ((error = snapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, exptype))) return (error); return (mapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, exptype)); } /* * Identify a set of blocks allocated in a snapshot inode. */ static int snapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, expungetype) struct vnode *vp; ufs2_daddr_t *oldblkp, *lastblkp; struct fs *fs; ufs_lbn_t lblkno; int expungetype; /* BLK_SNAP or BLK_NOCOPY */ { struct inode *ip = VTOI(vp); ufs2_daddr_t blkno, *blkp; ufs_lbn_t lbn; struct buf *ibp; int error; for ( ; oldblkp < lastblkp; oldblkp++) { blkno = *oldblkp; if (blkno == 0 || blkno == BLK_NOCOPY || blkno == BLK_SNAP) continue; lbn = fragstoblks(fs, blkno); if (lbn < UFS_NDADDR) { blkp = &ip->i_din2->di_db[lbn]; ip->i_flag |= IN_CHANGE | IN_UPDATE; } else { error = ffs_balloc_ufs2(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); if (error) return (error); blkp = &((ufs2_daddr_t *)(ibp->b_data)) [(lbn - UFS_NDADDR) % NINDIR(fs)]; } /* * If we are expunging a snapshot vnode and we * find a block marked BLK_NOCOPY, then it is * one that has been allocated to this snapshot after * we took our current snapshot and can be ignored. */ if (expungetype == BLK_SNAP && *blkp == BLK_NOCOPY) { if (lbn >= UFS_NDADDR) brelse(ibp); } else { if (*blkp != 0) panic("snapacct_ufs2: bad block"); *blkp = expungetype; if (lbn >= UFS_NDADDR) bdwrite(ibp); } } return (0); } /* * Account for a set of blocks allocated in a snapshot inode. */ static int mapacct_ufs2(vp, oldblkp, lastblkp, fs, lblkno, expungetype) struct vnode *vp; ufs2_daddr_t *oldblkp, *lastblkp; struct fs *fs; ufs_lbn_t lblkno; int expungetype; { ufs2_daddr_t blkno; struct inode *ip; ino_t inum; int acctit; ip = VTOI(vp); inum = ip->i_number; if (lblkno == -1) acctit = 0; else acctit = 1; for ( ; oldblkp < lastblkp; oldblkp++, lblkno++) { blkno = *oldblkp; if (blkno == 0 || blkno == BLK_NOCOPY) continue; if (acctit && expungetype == BLK_SNAP && blkno != BLK_SNAP) *ip->i_snapblklist++ = lblkno; if (blkno == BLK_SNAP) blkno = blkstofrags(fs, lblkno); ffs_blkfree(ITOUMP(ip), fs, vp, blkno, fs->fs_bsize, inum, vp->v_type, NULL, SINGLETON_KEY); } return (0); } /* * Decrement extra reference on snapshot when last name is removed. * It will not be freed until the last open reference goes away. */ void ffs_snapgone(ip) struct inode *ip; { struct inode *xp; struct fs *fs; int snaploc; struct snapdata *sn; struct ufsmount *ump; /* * Find snapshot in incore list. */ xp = NULL; sn = ITODEVVP(ip)->v_rdev->si_snapdata; if (sn != NULL) TAILQ_FOREACH(xp, &sn->sn_head, i_nextsnap) if (xp == ip) break; if (xp != NULL) vrele(ITOV(ip)); else if (snapdebug) printf("ffs_snapgone: lost snapshot vnode %ju\n", (uintmax_t)ip->i_number); /* * Delete snapshot inode from superblock. Keep list dense. */ ump = ITOUMP(ip); fs = ump->um_fs; UFS_LOCK(ump); for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++) if (fs->fs_snapinum[snaploc] == ip->i_number) break; if (snaploc < FSMAXSNAP) { for (snaploc++; snaploc < FSMAXSNAP; snaploc++) { if (fs->fs_snapinum[snaploc] == 0) break; fs->fs_snapinum[snaploc - 1] = fs->fs_snapinum[snaploc]; } fs->fs_snapinum[snaploc - 1] = 0; } UFS_UNLOCK(ump); } /* * Prepare a snapshot file for being removed. */ void ffs_snapremove(vp) struct vnode *vp; { struct inode *ip; struct vnode *devvp; struct buf *ibp; struct fs *fs; ufs2_daddr_t numblks, blkno, dblk; int error, i, last, loc; struct snapdata *sn; ip = VTOI(vp); fs = ITOFS(ip); devvp = ITODEVVP(ip); /* * If active, delete from incore list (this snapshot may * already have been in the process of being deleted, so * would not have been active). * * Clear copy-on-write flag if last snapshot. */ VI_LOCK(devvp); if (ip->i_nextsnap.tqe_prev != 0) { sn = devvp->v_rdev->si_snapdata; TAILQ_REMOVE(&sn->sn_head, ip, i_nextsnap); ip->i_nextsnap.tqe_prev = 0; VI_UNLOCK(devvp); lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); for (i = 0; i < sn->sn_lock.lk_recurse; i++) lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); KASSERT(vp->v_vnlock == &sn->sn_lock, ("ffs_snapremove: lost lock mutation")); vp->v_vnlock = &vp->v_lock; VI_LOCK(devvp); while (sn->sn_lock.lk_recurse > 0) lockmgr(&sn->sn_lock, LK_RELEASE, NULL); lockmgr(&sn->sn_lock, LK_RELEASE, NULL); try_free_snapdata(devvp); } else VI_UNLOCK(devvp); /* * Clear all BLK_NOCOPY fields. Pass any block claims to other * snapshots that want them (see ffs_snapblkfree below). */ for (blkno = 1; blkno < UFS_NDADDR; blkno++) { dblk = DIP(ip, i_db[blkno]); if (dblk == 0) continue; if (dblk == BLK_NOCOPY || dblk == BLK_SNAP) DIP_SET(ip, i_db[blkno], 0); else if ((dblk == blkstofrags(fs, blkno) && ffs_snapblkfree(fs, ITODEVVP(ip), dblk, fs->fs_bsize, ip->i_number, vp->v_type, NULL))) { DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - btodb(fs->fs_bsize)); DIP_SET(ip, i_db[blkno], 0); } } numblks = howmany(ip->i_size, fs->fs_bsize); for (blkno = UFS_NDADDR; blkno < numblks; blkno += NINDIR(fs)) { error = UFS_BALLOC(vp, lblktosize(fs, (off_t)blkno), fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); if (error) continue; if (fs->fs_size - blkno > NINDIR(fs)) last = NINDIR(fs); else last = fs->fs_size - blkno; for (loc = 0; loc < last; loc++) { if (I_IS_UFS1(ip)) { dblk = ((ufs1_daddr_t *)(ibp->b_data))[loc]; if (dblk == 0) continue; if (dblk == BLK_NOCOPY || dblk == BLK_SNAP) ((ufs1_daddr_t *)(ibp->b_data))[loc]= 0; else if ((dblk == blkstofrags(fs, blkno) && ffs_snapblkfree(fs, ITODEVVP(ip), dblk, fs->fs_bsize, ip->i_number, vp->v_type, NULL))) { ip->i_din1->di_blocks -= btodb(fs->fs_bsize); ((ufs1_daddr_t *)(ibp->b_data))[loc]= 0; } continue; } dblk = ((ufs2_daddr_t *)(ibp->b_data))[loc]; if (dblk == 0) continue; if (dblk == BLK_NOCOPY || dblk == BLK_SNAP) ((ufs2_daddr_t *)(ibp->b_data))[loc] = 0; else if ((dblk == blkstofrags(fs, blkno) && ffs_snapblkfree(fs, ITODEVVP(ip), dblk, fs->fs_bsize, ip->i_number, vp->v_type, NULL))) { ip->i_din2->di_blocks -= btodb(fs->fs_bsize); ((ufs2_daddr_t *)(ibp->b_data))[loc] = 0; } } bawrite(ibp); } /* * Clear snapshot flag and drop reference. */ ip->i_flags &= ~SF_SNAPSHOT; DIP_SET(ip, i_flags, ip->i_flags); ip->i_flag |= IN_CHANGE | IN_UPDATE; /* * The dirtied indirects must be written out before * softdep_setup_freeblocks() is called. Otherwise indir_trunc() * may find indirect pointers using the magic BLK_* values. */ if (DOINGSOFTDEP(vp)) ffs_syncvnode(vp, MNT_WAIT, 0); #ifdef QUOTA /* * Reenable disk quotas for ex-snapshot file. */ if (!getinoquota(ip)) (void) chkdq(ip, DIP(ip, i_blocks), KERNCRED, FORCE); #endif } /* * Notification that a block is being freed. Return zero if the free * should be allowed to proceed. Return non-zero if the snapshot file * wants to claim the block. The block will be claimed if it is an * uncopied part of one of the snapshots. It will be freed if it is * either a BLK_NOCOPY or has already been copied in all of the snapshots. * If a fragment is being freed, then all snapshots that care about * it must make a copy since a snapshot file can only claim full sized * blocks. Note that if more than one snapshot file maps the block, * we can pick one at random to claim it. Since none of the snapshots * can change, we are assurred that they will all see the same unmodified * image. When deleting a snapshot file (see ffs_snapremove above), we * must push any of these claimed blocks to one of the other snapshots * that maps it. These claimed blocks are easily identified as they will * have a block number equal to their logical block number within the * snapshot. A copied block can never have this property because they * must always have been allocated from a BLK_NOCOPY location. */ int ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, wkhd) struct fs *fs; struct vnode *devvp; ufs2_daddr_t bno; long size; ino_t inum; enum vtype vtype; struct workhead *wkhd; { struct buf *ibp, *cbp, *savedcbp = NULL; struct thread *td = curthread; struct inode *ip; struct vnode *vp = NULL; ufs_lbn_t lbn; ufs2_daddr_t blkno; int indiroff = 0, error = 0, claimedblk = 0; struct snapdata *sn; lbn = fragstoblks(fs, bno); retry: VI_LOCK(devvp); sn = devvp->v_rdev->si_snapdata; if (sn == NULL) { VI_UNLOCK(devvp); return (0); } if (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL, VI_MTX(devvp)) != 0) goto retry; TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) { vp = ITOV(ip); if (DOINGSOFTDEP(vp)) softdep_prealloc(vp, MNT_WAIT); /* * Lookup block being written. */ if (lbn < UFS_NDADDR) { blkno = DIP(ip, i_db[lbn]); } else { td->td_pflags |= TDP_COWINPROGRESS; error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); td->td_pflags &= ~TDP_COWINPROGRESS; if (error) break; indiroff = (lbn - UFS_NDADDR) % NINDIR(fs); if (I_IS_UFS1(ip)) blkno=((ufs1_daddr_t *)(ibp->b_data))[indiroff]; else blkno=((ufs2_daddr_t *)(ibp->b_data))[indiroff]; } /* * Check to see if block needs to be copied. */ if (blkno == 0) { /* * A block that we map is being freed. If it has not * been claimed yet, we will claim or copy it (below). */ claimedblk = 1; } else if (blkno == BLK_SNAP) { /* * No previous snapshot claimed the block, * so it will be freed and become a BLK_NOCOPY * (don't care) for us. */ if (claimedblk) panic("snapblkfree: inconsistent block type"); if (lbn < UFS_NDADDR) { DIP_SET(ip, i_db[lbn], BLK_NOCOPY); ip->i_flag |= IN_CHANGE | IN_UPDATE; } else if (I_IS_UFS1(ip)) { ((ufs1_daddr_t *)(ibp->b_data))[indiroff] = BLK_NOCOPY; bdwrite(ibp); } else { ((ufs2_daddr_t *)(ibp->b_data))[indiroff] = BLK_NOCOPY; bdwrite(ibp); } continue; } else /* BLK_NOCOPY or default */ { /* * If the snapshot has already copied the block * (default), or does not care about the block, * it is not needed. */ if (lbn >= UFS_NDADDR) bqrelse(ibp); continue; } /* * If this is a full size block, we will just grab it * and assign it to the snapshot inode. Otherwise we * will proceed to copy it. See explanation for this * routine as to why only a single snapshot needs to * claim this block. */ if (size == fs->fs_bsize) { #ifdef DEBUG if (snapdebug) printf("%s %ju lbn %jd from inum %ju\n", "Grabonremove: snapino", (uintmax_t)ip->i_number, (intmax_t)lbn, (uintmax_t)inum); #endif /* * If journaling is tracking this write we must add * the work to the inode or indirect being written. */ if (wkhd != NULL) { if (lbn < UFS_NDADDR) softdep_inode_append(ip, curthread->td_ucred, wkhd); else softdep_buf_append(ibp, wkhd); } if (lbn < UFS_NDADDR) { DIP_SET(ip, i_db[lbn], bno); } else if (I_IS_UFS1(ip)) { ((ufs1_daddr_t *)(ibp->b_data))[indiroff] = bno; bdwrite(ibp); } else { ((ufs2_daddr_t *)(ibp->b_data))[indiroff] = bno; bdwrite(ibp); } DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + btodb(size)); ip->i_flag |= IN_CHANGE | IN_UPDATE; lockmgr(vp->v_vnlock, LK_RELEASE, NULL); return (1); } if (lbn >= UFS_NDADDR) bqrelse(ibp); /* * Allocate the block into which to do the copy. Note that this * allocation will never require any additional allocations for * the snapshot inode. */ td->td_pflags |= TDP_COWINPROGRESS; error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, 0, &cbp); td->td_pflags &= ~TDP_COWINPROGRESS; if (error) break; #ifdef DEBUG if (snapdebug) printf("%s%ju lbn %jd %s %ju size %ld to blkno %jd\n", "Copyonremove: snapino ", (uintmax_t)ip->i_number, (intmax_t)lbn, "for inum", (uintmax_t)inum, size, (intmax_t)cbp->b_blkno); #endif /* * If we have already read the old block contents, then * simply copy them to the new block. Note that we need * to synchronously write snapshots that have not been * unlinked, and hence will be visible after a crash, * to ensure their integrity. At a minimum we ensure the * integrity of the filesystem metadata, but use the * dopersistence sysctl-setable flag to decide on the * persistence needed for file content data. */ if (savedcbp != NULL) { bcopy(savedcbp->b_data, cbp->b_data, fs->fs_bsize); bawrite(cbp); if ((vtype == VDIR || dopersistence) && ip->i_effnlink > 0) (void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT); continue; } /* * Otherwise, read the old block contents into the buffer. */ if ((error = readblock(vp, cbp, lbn)) != 0) { bzero(cbp->b_data, fs->fs_bsize); bawrite(cbp); if ((vtype == VDIR || dopersistence) && ip->i_effnlink > 0) (void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT); break; } savedcbp = cbp; } /* * Note that we need to synchronously write snapshots that * have not been unlinked, and hence will be visible after * a crash, to ensure their integrity. At a minimum we * ensure the integrity of the filesystem metadata, but * use the dopersistence sysctl-setable flag to decide on * the persistence needed for file content data. */ if (savedcbp) { vp = savedcbp->b_vp; bawrite(savedcbp); if ((vtype == VDIR || dopersistence) && VTOI(vp)->i_effnlink > 0) (void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT); } /* * If we have been unable to allocate a block in which to do * the copy, then return non-zero so that the fragment will * not be freed. Although space will be lost, the snapshot * will stay consistent. */ if (error != 0 && wkhd != NULL) softdep_freework(wkhd); lockmgr(&sn->sn_lock, LK_RELEASE, NULL); return (error); } /* * Associate snapshot files when mounting. */ void ffs_snapshot_mount(mp) struct mount *mp; { struct ufsmount *ump = VFSTOUFS(mp); struct vnode *devvp = ump->um_devvp; struct fs *fs = ump->um_fs; struct thread *td = curthread; struct snapdata *sn; struct vnode *vp; struct vnode *lastvp; struct inode *ip; struct uio auio; struct iovec aiov; void *snapblklist; char *reason; daddr_t snaplistsize; int error, snaploc, loc; /* * XXX The following needs to be set before ffs_truncate or * VOP_READ can be called. */ mp->mnt_stat.f_iosize = fs->fs_bsize; /* * Process each snapshot listed in the superblock. */ vp = NULL; lastvp = NULL; sn = NULL; for (snaploc = 0; snaploc < FSMAXSNAP; snaploc++) { if (fs->fs_snapinum[snaploc] == 0) break; if ((error = ffs_vget(mp, fs->fs_snapinum[snaploc], LK_EXCLUSIVE, &vp)) != 0){ printf("ffs_snapshot_mount: vget failed %d\n", error); continue; } ip = VTOI(vp); if (!IS_SNAPSHOT(ip) || ip->i_size == lblktosize(fs, howmany(fs->fs_size, fs->fs_frag))) { if (!IS_SNAPSHOT(ip)) { reason = "non-snapshot"; } else { reason = "old format snapshot"; (void)ffs_truncate(vp, (off_t)0, 0, NOCRED); (void)ffs_syncvnode(vp, MNT_WAIT, 0); } printf("ffs_snapshot_mount: %s inode %d\n", reason, fs->fs_snapinum[snaploc]); vput(vp); vp = NULL; for (loc = snaploc + 1; loc < FSMAXSNAP; loc++) { if (fs->fs_snapinum[loc] == 0) break; fs->fs_snapinum[loc - 1] = fs->fs_snapinum[loc]; } fs->fs_snapinum[loc - 1] = 0; snaploc--; continue; } /* * Acquire a lock on the snapdata structure, creating it if * necessary. */ sn = ffs_snapdata_acquire(devvp); /* * Change vnode to use shared snapshot lock instead of the * original private lock. */ vp->v_vnlock = &sn->sn_lock; lockmgr(&vp->v_lock, LK_RELEASE, NULL); /* * Link it onto the active snapshot list. */ VI_LOCK(devvp); if (ip->i_nextsnap.tqe_prev != 0) panic("ffs_snapshot_mount: %ju already on list", (uintmax_t)ip->i_number); else TAILQ_INSERT_TAIL(&sn->sn_head, ip, i_nextsnap); vp->v_vflag |= VV_SYSTEM; VI_UNLOCK(devvp); VOP_UNLOCK(vp, 0); lastvp = vp; } vp = lastvp; /* * No usable snapshots found. */ if (sn == NULL || vp == NULL) return; /* * Allocate the space for the block hints list. We always want to * use the list from the newest snapshot. */ auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = (void *)&snaplistsize; aiov.iov_len = sizeof(snaplistsize); auio.uio_resid = aiov.iov_len; auio.uio_offset = lblktosize(fs, howmany(fs->fs_size, fs->fs_frag)); auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_td = td; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if ((error = VOP_READ(vp, &auio, IO_UNIT, td->td_ucred)) != 0) { printf("ffs_snapshot_mount: read_1 failed %d\n", error); VOP_UNLOCK(vp, 0); return; } snapblklist = malloc(snaplistsize * sizeof(daddr_t), M_UFSMNT, M_WAITOK); auio.uio_iovcnt = 1; aiov.iov_base = snapblklist; aiov.iov_len = snaplistsize * sizeof (daddr_t); auio.uio_resid = aiov.iov_len; auio.uio_offset -= sizeof(snaplistsize); if ((error = VOP_READ(vp, &auio, IO_UNIT, td->td_ucred)) != 0) { printf("ffs_snapshot_mount: read_2 failed %d\n", error); VOP_UNLOCK(vp, 0); free(snapblklist, M_UFSMNT); return; } VOP_UNLOCK(vp, 0); VI_LOCK(devvp); ASSERT_VOP_LOCKED(devvp, "ffs_snapshot_mount"); sn->sn_listsize = snaplistsize; sn->sn_blklist = (daddr_t *)snapblklist; devvp->v_vflag |= VV_COPYONWRITE; VI_UNLOCK(devvp); } /* * Disassociate snapshot files when unmounting. */ void ffs_snapshot_unmount(mp) struct mount *mp; { struct vnode *devvp = VFSTOUFS(mp)->um_devvp; struct snapdata *sn; struct inode *xp; struct vnode *vp; VI_LOCK(devvp); sn = devvp->v_rdev->si_snapdata; while (sn != NULL && (xp = TAILQ_FIRST(&sn->sn_head)) != NULL) { vp = ITOV(xp); TAILQ_REMOVE(&sn->sn_head, xp, i_nextsnap); xp->i_nextsnap.tqe_prev = 0; lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE, VI_MTX(devvp)); lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); KASSERT(vp->v_vnlock == &sn->sn_lock, ("ffs_snapshot_unmount: lost lock mutation")); vp->v_vnlock = &vp->v_lock; lockmgr(&vp->v_lock, LK_RELEASE, NULL); lockmgr(&sn->sn_lock, LK_RELEASE, NULL); if (xp->i_effnlink > 0) vrele(vp); VI_LOCK(devvp); sn = devvp->v_rdev->si_snapdata; } try_free_snapdata(devvp); ASSERT_VOP_LOCKED(devvp, "ffs_snapshot_unmount"); } /* * Check the buffer block to be belong to device buffer that shall be * locked after snaplk. devvp shall be locked on entry, and will be * leaved locked upon exit. */ static int ffs_bp_snapblk(devvp, bp) struct vnode *devvp; struct buf *bp; { struct snapdata *sn; struct fs *fs; ufs2_daddr_t lbn, *snapblklist; int lower, upper, mid; ASSERT_VI_LOCKED(devvp, "ffs_bp_snapblk"); KASSERT(devvp->v_type == VCHR, ("Not a device %p", devvp)); sn = devvp->v_rdev->si_snapdata; if (sn == NULL || TAILQ_FIRST(&sn->sn_head) == NULL) return (0); fs = ITOFS(TAILQ_FIRST(&sn->sn_head)); lbn = fragstoblks(fs, dbtofsb(fs, bp->b_blkno)); snapblklist = sn->sn_blklist; upper = sn->sn_listsize - 1; lower = 1; while (lower <= upper) { mid = (lower + upper) / 2; if (snapblklist[mid] == lbn) break; if (snapblklist[mid] < lbn) lower = mid + 1; else upper = mid - 1; } if (lower <= upper) return (1); return (0); } void ffs_bdflush(bo, bp) struct bufobj *bo; struct buf *bp; { struct thread *td; struct vnode *vp, *devvp; struct buf *nbp; int bp_bdskip; if (bo->bo_dirty.bv_cnt <= dirtybufthresh) return; td = curthread; vp = bp->b_vp; devvp = bo2vnode(bo); KASSERT(vp == devvp, ("devvp != vp %p %p", bo, bp)); VI_LOCK(devvp); bp_bdskip = ffs_bp_snapblk(devvp, bp); if (bp_bdskip) bdwriteskip++; VI_UNLOCK(devvp); if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10 && !bp_bdskip) { (void) VOP_FSYNC(vp, MNT_NOWAIT, td); altbufferflushes++; } else { BO_LOCK(bo); /* * Try to find a buffer to flush. */ TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) { if ((nbp->b_vflags & BV_BKGRDINPROG) || BUF_LOCK(nbp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) continue; if (bp == nbp) panic("bdwrite: found ourselves"); BO_UNLOCK(bo); /* * Don't countdeps with the bo lock * held. */ if (buf_countdeps(nbp, 0)) { BO_LOCK(bo); BUF_UNLOCK(nbp); continue; } if (bp_bdskip) { VI_LOCK(devvp); if (!ffs_bp_snapblk(vp, nbp)) { VI_UNLOCK(devvp); BO_LOCK(bo); BUF_UNLOCK(nbp); continue; } VI_UNLOCK(devvp); } if (nbp->b_flags & B_CLUSTEROK) { vfs_bio_awrite(nbp); } else { bremfree(nbp); bawrite(nbp); } dirtybufferflushes++; break; } if (nbp == NULL) BO_UNLOCK(bo); } } /* * Check for need to copy block that is about to be written, * copying the block if necessary. */ int ffs_copyonwrite(devvp, bp) struct vnode *devvp; struct buf *bp; { struct snapdata *sn; struct buf *ibp, *cbp, *savedcbp = NULL; struct thread *td = curthread; struct fs *fs; struct inode *ip; struct vnode *vp = NULL; ufs2_daddr_t lbn, blkno, *snapblklist; int lower, upper, mid, indiroff, error = 0; int launched_async_io, prev_norunningbuf; long saved_runningbufspace; if (devvp != bp->b_vp && IS_SNAPSHOT(VTOI(bp->b_vp))) return (0); /* Update on a snapshot file */ if (td->td_pflags & TDP_COWINPROGRESS) panic("ffs_copyonwrite: recursive call"); /* * First check to see if it is in the preallocated list. * By doing this check we avoid several potential deadlocks. */ VI_LOCK(devvp); sn = devvp->v_rdev->si_snapdata; if (sn == NULL || TAILQ_EMPTY(&sn->sn_head)) { VI_UNLOCK(devvp); return (0); /* No snapshot */ } ip = TAILQ_FIRST(&sn->sn_head); fs = ITOFS(ip); lbn = fragstoblks(fs, dbtofsb(fs, bp->b_blkno)); snapblklist = sn->sn_blklist; upper = sn->sn_listsize - 1; lower = 1; while (lower <= upper) { mid = (lower + upper) / 2; if (snapblklist[mid] == lbn) break; if (snapblklist[mid] < lbn) lower = mid + 1; else upper = mid - 1; } if (lower <= upper) { VI_UNLOCK(devvp); return (0); } launched_async_io = 0; prev_norunningbuf = td->td_pflags & TDP_NORUNNINGBUF; /* * Since I/O on bp isn't yet in progress and it may be blocked * for a long time waiting on snaplk, back it out of * runningbufspace, possibly waking other threads waiting for space. */ saved_runningbufspace = bp->b_runningbufspace; if (saved_runningbufspace != 0) runningbufwakeup(bp); /* * Not in the precomputed list, so check the snapshots. */ while (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL, VI_MTX(devvp)) != 0) { VI_LOCK(devvp); sn = devvp->v_rdev->si_snapdata; if (sn == NULL || TAILQ_EMPTY(&sn->sn_head)) { VI_UNLOCK(devvp); if (saved_runningbufspace != 0) { bp->b_runningbufspace = saved_runningbufspace; atomic_add_long(&runningbufspace, bp->b_runningbufspace); } return (0); /* Snapshot gone */ } } TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) { vp = ITOV(ip); if (DOINGSOFTDEP(vp)) softdep_prealloc(vp, MNT_WAIT); /* * We ensure that everything of our own that needs to be * copied will be done at the time that ffs_snapshot is * called. Thus we can skip the check here which can * deadlock in doing the lookup in UFS_BALLOC. */ if (bp->b_vp == vp) continue; /* * Check to see if block needs to be copied. We do not have * to hold the snapshot lock while doing this lookup as it * will never require any additional allocations for the * snapshot inode. */ if (lbn < UFS_NDADDR) { blkno = DIP(ip, i_db[lbn]); } else { td->td_pflags |= TDP_COWINPROGRESS | TDP_NORUNNINGBUF; error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, BA_METAONLY, &ibp); td->td_pflags &= ~TDP_COWINPROGRESS; if (error) break; indiroff = (lbn - UFS_NDADDR) % NINDIR(fs); if (I_IS_UFS1(ip)) blkno=((ufs1_daddr_t *)(ibp->b_data))[indiroff]; else blkno=((ufs2_daddr_t *)(ibp->b_data))[indiroff]; bqrelse(ibp); } #ifdef INVARIANTS if (blkno == BLK_SNAP && bp->b_lblkno >= 0) panic("ffs_copyonwrite: bad copy block"); #endif if (blkno != 0) continue; /* * Allocate the block into which to do the copy. Since * multiple processes may all try to copy the same block, * we have to recheck our need to do a copy if we sleep * waiting for the lock. * * Because all snapshots on a filesystem share a single * lock, we ensure that we will never be in competition * with another process to allocate a block. */ td->td_pflags |= TDP_COWINPROGRESS | TDP_NORUNNINGBUF; error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, KERNCRED, 0, &cbp); td->td_pflags &= ~TDP_COWINPROGRESS; if (error) break; #ifdef DEBUG if (snapdebug) { printf("Copyonwrite: snapino %ju lbn %jd for ", (uintmax_t)ip->i_number, (intmax_t)lbn); if (bp->b_vp == devvp) printf("fs metadata"); else printf("inum %ju", (uintmax_t)VTOI(bp->b_vp)->i_number); printf(" lblkno %jd to blkno %jd\n", (intmax_t)bp->b_lblkno, (intmax_t)cbp->b_blkno); } #endif /* * If we have already read the old block contents, then * simply copy them to the new block. Note that we need * to synchronously write snapshots that have not been * unlinked, and hence will be visible after a crash, * to ensure their integrity. At a minimum we ensure the * integrity of the filesystem metadata, but use the * dopersistence sysctl-setable flag to decide on the * persistence needed for file content data. */ if (savedcbp != NULL) { bcopy(savedcbp->b_data, cbp->b_data, fs->fs_bsize); bawrite(cbp); if ((devvp == bp->b_vp || bp->b_vp->v_type == VDIR || dopersistence) && ip->i_effnlink > 0) (void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT); else launched_async_io = 1; continue; } /* * Otherwise, read the old block contents into the buffer. */ if ((error = readblock(vp, cbp, lbn)) != 0) { bzero(cbp->b_data, fs->fs_bsize); bawrite(cbp); if ((devvp == bp->b_vp || bp->b_vp->v_type == VDIR || dopersistence) && ip->i_effnlink > 0) (void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT); else launched_async_io = 1; break; } savedcbp = cbp; } /* * Note that we need to synchronously write snapshots that * have not been unlinked, and hence will be visible after * a crash, to ensure their integrity. At a minimum we * ensure the integrity of the filesystem metadata, but * use the dopersistence sysctl-setable flag to decide on * the persistence needed for file content data. */ if (savedcbp) { vp = savedcbp->b_vp; bawrite(savedcbp); if ((devvp == bp->b_vp || bp->b_vp->v_type == VDIR || dopersistence) && VTOI(vp)->i_effnlink > 0) (void) ffs_syncvnode(vp, MNT_WAIT, NO_INO_UPDT); else launched_async_io = 1; } lockmgr(vp->v_vnlock, LK_RELEASE, NULL); td->td_pflags = (td->td_pflags & ~TDP_NORUNNINGBUF) | prev_norunningbuf; if (launched_async_io && (td->td_pflags & TDP_NORUNNINGBUF) == 0) waitrunningbufspace(); /* * I/O on bp will now be started, so count it in runningbufspace. */ if (saved_runningbufspace != 0) { bp->b_runningbufspace = saved_runningbufspace; atomic_add_long(&runningbufspace, bp->b_runningbufspace); } return (error); } /* * sync snapshots to force freework records waiting on snapshots to claim * blocks to free. */ void ffs_sync_snap(mp, waitfor) struct mount *mp; int waitfor; { struct snapdata *sn; struct vnode *devvp; struct vnode *vp; struct inode *ip; devvp = VFSTOUFS(mp)->um_devvp; if ((devvp->v_vflag & VV_COPYONWRITE) == 0) return; for (;;) { VI_LOCK(devvp); sn = devvp->v_rdev->si_snapdata; if (sn == NULL) { VI_UNLOCK(devvp); return; } if (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL, VI_MTX(devvp)) == 0) break; } TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) { vp = ITOV(ip); ffs_syncvnode(vp, waitfor, NO_INO_UPDT); } lockmgr(&sn->sn_lock, LK_RELEASE, NULL); } /* * Read the specified block into the given buffer. * Much of this boiler-plate comes from bwrite(). */ static int readblock(vp, bp, lbn) struct vnode *vp; struct buf *bp; ufs2_daddr_t lbn; { struct inode *ip; struct bio *bip; struct fs *fs; ip = VTOI(vp); fs = ITOFS(ip); bip = g_alloc_bio(); bip->bio_cmd = BIO_READ; bip->bio_offset = dbtob(fsbtodb(fs, blkstofrags(fs, lbn))); bip->bio_data = bp->b_data; bip->bio_length = bp->b_bcount; bip->bio_done = NULL; g_io_request(bip, ITODEVVP(ip)->v_bufobj.bo_private); bp->b_error = biowait(bip, "snaprdb"); g_destroy_bio(bip); return (bp->b_error); } #endif /* * Process file deletes that were deferred by ufs_inactive() due to * the file system being suspended. Transfer IN_LAZYACCESS into * IN_MODIFIED for vnodes that were accessed during suspension. */ void process_deferred_inactive(struct mount *mp) { struct vnode *vp, *mvp; struct inode *ip; struct thread *td; int error; td = curthread; (void) vn_start_secondary_write(NULL, &mp, V_WAIT); loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * IN_LAZYACCESS is checked here without holding any * vnode lock, but this flag is set only while holding * vnode interlock. */ if (vp->v_type == VNON || ((VTOI(vp)->i_flag & IN_LAZYACCESS) == 0 && ((vp->v_iflag & VI_OWEINACT) == 0 || vp->v_usecount > 0))) { VI_UNLOCK(vp); continue; } vholdl(vp); error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); if (error != 0) { vdrop(vp); if (error == ENOENT) continue; /* vnode recycled */ MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } ip = VTOI(vp); if ((ip->i_flag & IN_LAZYACCESS) != 0) { ip->i_flag &= ~IN_LAZYACCESS; ip->i_flag |= IN_MODIFIED; } VI_LOCK(vp); if ((vp->v_iflag & VI_OWEINACT) == 0 || vp->v_usecount > 0) { VI_UNLOCK(vp); VOP_UNLOCK(vp, 0); vdrop(vp); continue; } vinactive(vp, td); VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, ("process_deferred_inactive: got VI_OWEINACT")); VI_UNLOCK(vp); VOP_UNLOCK(vp, 0); vdrop(vp); } vn_finished_secondary_write(mp); } #ifndef NO_FFS_SNAPSHOT static struct snapdata * ffs_snapdata_alloc(void) { struct snapdata *sn; /* * Fetch a snapdata from the free list if there is one available. */ mtx_lock(&snapfree_lock); sn = LIST_FIRST(&snapfree); if (sn != NULL) LIST_REMOVE(sn, sn_link); mtx_unlock(&snapfree_lock); if (sn != NULL) return (sn); /* * If there were no free snapdatas allocate one. */ sn = malloc(sizeof *sn, M_UFSMNT, M_WAITOK | M_ZERO); TAILQ_INIT(&sn->sn_head); lockinit(&sn->sn_lock, PVFS, "snaplk", VLKTIMEOUT, LK_CANRECURSE | LK_NOSHARE); return (sn); } /* * The snapdata is never freed because we can not be certain that * there are no threads sleeping on the snap lock. Persisting * them permanently avoids costly synchronization in ffs_lock(). */ static void ffs_snapdata_free(struct snapdata *sn) { mtx_lock(&snapfree_lock); LIST_INSERT_HEAD(&snapfree, sn, sn_link); mtx_unlock(&snapfree_lock); } /* Try to free snapdata associated with devvp */ static void try_free_snapdata(struct vnode *devvp) { struct snapdata *sn; ufs2_daddr_t *snapblklist; ASSERT_VI_LOCKED(devvp, "try_free_snapdata"); sn = devvp->v_rdev->si_snapdata; if (sn == NULL || TAILQ_FIRST(&sn->sn_head) != NULL || (devvp->v_vflag & VV_COPYONWRITE) == 0) { VI_UNLOCK(devvp); return; } devvp->v_rdev->si_snapdata = NULL; devvp->v_vflag &= ~VV_COPYONWRITE; lockmgr(&sn->sn_lock, LK_DRAIN|LK_INTERLOCK, VI_MTX(devvp)); snapblklist = sn->sn_blklist; sn->sn_blklist = NULL; sn->sn_listsize = 0; lockmgr(&sn->sn_lock, LK_RELEASE, NULL); if (snapblklist != NULL) free(snapblklist, M_UFSMNT); ffs_snapdata_free(sn); } static struct snapdata * ffs_snapdata_acquire(struct vnode *devvp) { struct snapdata *nsn, *sn; int error; /* * Allocate a free snapdata. This is done before acquiring the * devvp lock to avoid allocation while the devvp interlock is * held. */ nsn = ffs_snapdata_alloc(); for (;;) { VI_LOCK(devvp); sn = devvp->v_rdev->si_snapdata; if (sn == NULL) { /* * This is the first snapshot on this * filesystem and we use our pre-allocated * snapdata. Publish sn with the sn_lock * owned by us, to avoid the race. */ error = lockmgr(&nsn->sn_lock, LK_EXCLUSIVE | LK_NOWAIT, NULL); if (error != 0) panic("leaked sn, lockmgr error %d", error); sn = devvp->v_rdev->si_snapdata = nsn; VI_UNLOCK(devvp); nsn = NULL; break; } /* * There is a snapshots which already exists on this * filesystem, grab a reference to the common lock. */ error = lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL, VI_MTX(devvp)); if (error == 0) break; } /* * Free any unused snapdata. */ if (nsn != NULL) ffs_snapdata_free(nsn); return (sn); } #endif Index: projects/clang700-import/sys/ufs/ffs/ffs_softdep.c =================================================================== --- projects/clang700-import/sys/ufs/ffs/ffs_softdep.c (revision 340426) +++ projects/clang700-import/sys/ufs/ffs/ffs_softdep.c (revision 340427) @@ -1,14526 +1,14530 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 1998, 2000 Marshall Kirk McKusick. * Copyright 2009, 2010 Jeffrey W. Roberson * All rights reserved. * * The soft updates code is derived from the appendix of a University * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, * "Soft Updates: A Solution to the Metadata Update Problem in File * Systems", CSE-TR-254-95, August 1995). * * Further information about soft updates can be obtained from: * * Marshall Kirk McKusick http://www.mckusick.com/softdep/ * 1614 Oxford Street mckusick@mckusick.com * Berkeley, CA 94709-1608 +1-510-843-9542 * USA * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 */ #include __FBSDID("$FreeBSD$"); #include "opt_ffs.h" #include "opt_quota.h" #include "opt_ddb.h" /* * For now we want the safety net that the DEBUG flag provides. */ #ifndef DEBUG #define DEBUG #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define KTR_SUJ 0 /* Define to KTR_SPARE. */ #ifndef SOFTUPDATES int softdep_flushfiles(oldmnt, flags, td) struct mount *oldmnt; int flags; struct thread *td; { panic("softdep_flushfiles called"); } int softdep_mount(devvp, mp, fs, cred) struct vnode *devvp; struct mount *mp; struct fs *fs; struct ucred *cred; { return (0); } void softdep_initialize() { return; } void softdep_uninitialize() { return; } void softdep_unmount(mp) struct mount *mp; { panic("softdep_unmount called"); } void softdep_setup_sbupdate(ump, fs, bp) struct ufsmount *ump; struct fs *fs; struct buf *bp; { panic("softdep_setup_sbupdate called"); } void softdep_setup_inomapdep(bp, ip, newinum, mode) struct buf *bp; struct inode *ip; ino_t newinum; int mode; { panic("softdep_setup_inomapdep called"); } void softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) struct buf *bp; struct mount *mp; ufs2_daddr_t newblkno; int frags; int oldfrags; { panic("softdep_setup_blkmapdep called"); } void softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) struct inode *ip; ufs_lbn_t lbn; ufs2_daddr_t newblkno; ufs2_daddr_t oldblkno; long newsize; long oldsize; struct buf *bp; { panic("softdep_setup_allocdirect called"); } void softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) struct inode *ip; ufs_lbn_t lbn; ufs2_daddr_t newblkno; ufs2_daddr_t oldblkno; long newsize; long oldsize; struct buf *bp; { panic("softdep_setup_allocext called"); } void softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) struct inode *ip; ufs_lbn_t lbn; struct buf *bp; int ptrno; ufs2_daddr_t newblkno; ufs2_daddr_t oldblkno; struct buf *nbp; { panic("softdep_setup_allocindir_page called"); } void softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) struct buf *nbp; struct inode *ip; struct buf *bp; int ptrno; ufs2_daddr_t newblkno; { panic("softdep_setup_allocindir_meta called"); } void softdep_journal_freeblocks(ip, cred, length, flags) struct inode *ip; struct ucred *cred; off_t length; int flags; { panic("softdep_journal_freeblocks called"); } void softdep_journal_fsync(ip) struct inode *ip; { panic("softdep_journal_fsync called"); } void softdep_setup_freeblocks(ip, length, flags) struct inode *ip; off_t length; int flags; { panic("softdep_setup_freeblocks called"); } void softdep_freefile(pvp, ino, mode) struct vnode *pvp; ino_t ino; int mode; { panic("softdep_freefile called"); } int softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) struct buf *bp; struct inode *dp; off_t diroffset; ino_t newinum; struct buf *newdirbp; int isnewblk; { panic("softdep_setup_directory_add called"); } void softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) struct buf *bp; struct inode *dp; caddr_t base; caddr_t oldloc; caddr_t newloc; int entrysize; { panic("softdep_change_directoryentry_offset called"); } void softdep_setup_remove(bp, dp, ip, isrmdir) struct buf *bp; struct inode *dp; struct inode *ip; int isrmdir; { panic("softdep_setup_remove called"); } void softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) struct buf *bp; struct inode *dp; struct inode *ip; ino_t newinum; int isrmdir; { panic("softdep_setup_directory_change called"); } void softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) struct mount *mp; struct buf *bp; ufs2_daddr_t blkno; int frags; struct workhead *wkhd; { panic("%s called", __FUNCTION__); } void softdep_setup_inofree(mp, bp, ino, wkhd) struct mount *mp; struct buf *bp; ino_t ino; struct workhead *wkhd; { panic("%s called", __FUNCTION__); } void softdep_setup_unlink(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_link(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_revert_link(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_rmdir(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_revert_rmdir(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_create(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_revert_create(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_mkdir(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_revert_mkdir(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } void softdep_setup_dotdot_link(dp, ip) struct inode *dp; struct inode *ip; { panic("%s called", __FUNCTION__); } int softdep_prealloc(vp, waitok) struct vnode *vp; int waitok; { panic("%s called", __FUNCTION__); } int softdep_journal_lookup(mp, vpp) struct mount *mp; struct vnode **vpp; { return (ENOENT); } void softdep_change_linkcnt(ip) struct inode *ip; { panic("softdep_change_linkcnt called"); } void softdep_load_inodeblock(ip) struct inode *ip; { panic("softdep_load_inodeblock called"); } void softdep_update_inodeblock(ip, bp, waitfor) struct inode *ip; struct buf *bp; int waitfor; { panic("softdep_update_inodeblock called"); } int softdep_fsync(vp) struct vnode *vp; /* the "in_core" copy of the inode */ { return (0); } void softdep_fsync_mountdev(vp) struct vnode *vp; { return; } int softdep_flushworklist(oldmnt, countp, td) struct mount *oldmnt; int *countp; struct thread *td; { *countp = 0; return (0); } int softdep_sync_metadata(struct vnode *vp) { panic("softdep_sync_metadata called"); } int softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) { panic("softdep_sync_buf called"); } int softdep_slowdown(vp) struct vnode *vp; { panic("softdep_slowdown called"); } int softdep_request_cleanup(fs, vp, cred, resource) struct fs *fs; struct vnode *vp; struct ucred *cred; int resource; { return (0); } int softdep_check_suspend(struct mount *mp, struct vnode *devvp, int softdep_depcnt, int softdep_accdepcnt, int secondary_writes, int secondary_accwrites) { struct bufobj *bo; int error; (void) softdep_depcnt, (void) softdep_accdepcnt; bo = &devvp->v_bufobj; ASSERT_BO_WLOCKED(bo); MNT_ILOCK(mp); while (mp->mnt_secondary_writes != 0) { BO_UNLOCK(bo); msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), (PUSER - 1) | PDROP, "secwr", 0); BO_LOCK(bo); MNT_ILOCK(mp); } /* * Reasons for needing more work before suspend: * - Dirty buffers on devvp. * - Secondary writes occurred after start of vnode sync loop */ error = 0; if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0 || secondary_writes != 0 || mp->mnt_secondary_writes != 0 || secondary_accwrites != mp->mnt_secondary_accwrites) error = EAGAIN; BO_UNLOCK(bo); return (error); } void softdep_get_depcounts(struct mount *mp, int *softdepactivep, int *softdepactiveaccp) { (void) mp; *softdepactivep = 0; *softdepactiveaccp = 0; } void softdep_buf_append(bp, wkhd) struct buf *bp; struct workhead *wkhd; { panic("softdep_buf_appendwork called"); } void softdep_inode_append(ip, cred, wkhd) struct inode *ip; struct ucred *cred; struct workhead *wkhd; { panic("softdep_inode_appendwork called"); } void softdep_freework(wkhd) struct workhead *wkhd; { panic("softdep_freework called"); } #else FEATURE(softupdates, "FFS soft-updates support"); static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0, "soft updates stats"); static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0, "total dependencies allocated"); static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0, "high use dependencies allocated"); static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0, "current dependencies allocated"); static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0, "current dependencies written"); unsigned long dep_current[D_LAST + 1]; unsigned long dep_highuse[D_LAST + 1]; unsigned long dep_total[D_LAST + 1]; unsigned long dep_write[D_LAST + 1]; #define SOFTDEP_TYPE(type, str, long) \ static MALLOC_DEFINE(M_ ## type, #str, long); \ SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \ &dep_total[D_ ## type], 0, ""); \ SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \ &dep_current[D_ ## type], 0, ""); \ SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, \ &dep_highuse[D_ ## type], 0, ""); \ SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \ &dep_write[D_ ## type], 0, ""); SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies"); SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies"); SOFTDEP_TYPE(BMSAFEMAP, bmsafemap, "Block or frag allocated from cyl group map"); SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency"); SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode"); SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies"); SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block"); SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode"); SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode"); SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated"); SOFTDEP_TYPE(DIRADD, diradd, "New directory entry"); SOFTDEP_TYPE(MKDIR, mkdir, "New directory"); SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted"); SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block"); SOFTDEP_TYPE(FREEWORK, freework, "free an inode block"); SOFTDEP_TYPE(FREEDEP, freedep, "track a block free"); SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add"); SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove"); SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move"); SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block"); SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block"); SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag"); SOFTDEP_TYPE(JSEG, jseg, "Journal segment"); SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete"); SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency"); SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation"); SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete"); static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel"); static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes"); static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations"); static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data"); #define M_SOFTDEP_FLAGS (M_WAITOK) /* * translate from workitem type to memory type * MUST match the defines above, such that memtype[D_XXX] == M_XXX */ static struct malloc_type *memtype[] = { NULL, M_PAGEDEP, M_INODEDEP, M_BMSAFEMAP, M_NEWBLK, M_ALLOCDIRECT, M_INDIRDEP, M_ALLOCINDIR, M_FREEFRAG, M_FREEBLKS, M_FREEFILE, M_DIRADD, M_MKDIR, M_DIRREM, M_NEWDIRBLK, M_FREEWORK, M_FREEDEP, M_JADDREF, M_JREMREF, M_JMVREF, M_JNEWBLK, M_JFREEBLK, M_JFREEFRAG, M_JSEG, M_JSEGDEP, M_SBDEP, M_JTRUNC, M_JFSYNC, M_SENTINEL }; #define DtoM(type) (memtype[type]) /* * Names of malloc types. */ #define TYPENAME(type) \ ((unsigned)(type) <= D_LAST && (unsigned)(type) >= D_FIRST ? \ memtype[type]->ks_shortdesc : "???") /* * End system adaptation definitions. */ #define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino) #define DOT_OFFSET offsetof(struct dirtemplate, dot_ino) /* * Internal function prototypes. */ static void check_clear_deps(struct mount *); static void softdep_error(char *, int); static int softdep_process_worklist(struct mount *, int); static int softdep_waitidle(struct mount *, int); static void drain_output(struct vnode *); static struct buf *getdirtybuf(struct buf *, struct rwlock *, int); static int check_inodedep_free(struct inodedep *); static void clear_remove(struct mount *); static void clear_inodedeps(struct mount *); static void unlinked_inodedep(struct mount *, struct inodedep *); static void clear_unlinked_inodedep(struct inodedep *); static struct inodedep *first_unlinked_inodedep(struct ufsmount *); static int flush_pagedep_deps(struct vnode *, struct mount *, struct diraddhd *); static int free_pagedep(struct pagedep *); static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t); static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t); static int flush_deplist(struct allocdirectlst *, int, int *); static int sync_cgs(struct mount *, int); static int handle_written_filepage(struct pagedep *, struct buf *, int); static int handle_written_sbdep(struct sbdep *, struct buf *); static void initiate_write_sbdep(struct sbdep *); static void diradd_inode_written(struct diradd *, struct inodedep *); static int handle_written_indirdep(struct indirdep *, struct buf *, struct buf**, int); static int handle_written_inodeblock(struct inodedep *, struct buf *, int); static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *, uint8_t *); static int handle_written_bmsafemap(struct bmsafemap *, struct buf *, int); static void handle_written_jaddref(struct jaddref *); static void handle_written_jremref(struct jremref *); static void handle_written_jseg(struct jseg *, struct buf *); static void handle_written_jnewblk(struct jnewblk *); static void handle_written_jblkdep(struct jblkdep *); static void handle_written_jfreefrag(struct jfreefrag *); static void complete_jseg(struct jseg *); static void complete_jsegs(struct jseg *); static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *); static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *); static void jremref_write(struct jremref *, struct jseg *, uint8_t *); static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *); static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *); static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data); static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *); static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *); static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *); static inline void inoref_write(struct inoref *, struct jseg *, struct jrefrec *); static void handle_allocdirect_partdone(struct allocdirect *, struct workhead *); static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *, struct workhead *); static void indirdep_complete(struct indirdep *); static int indirblk_lookup(struct mount *, ufs2_daddr_t); static void indirblk_insert(struct freework *); static void indirblk_remove(struct freework *); static void handle_allocindir_partdone(struct allocindir *); static void initiate_write_filepage(struct pagedep *, struct buf *); static void initiate_write_indirdep(struct indirdep*, struct buf *); static void handle_written_mkdir(struct mkdir *, int); static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *, uint8_t *); static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *); static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); static void handle_workitem_freefile(struct freefile *); static int handle_workitem_remove(struct dirrem *, int); static struct dirrem *newdirrem(struct buf *, struct inode *, struct inode *, int, struct dirrem **); static struct indirdep *indirdep_lookup(struct mount *, struct inode *, struct buf *); static void cancel_indirdep(struct indirdep *, struct buf *, struct freeblks *); static void free_indirdep(struct indirdep *); static void free_diradd(struct diradd *, struct workhead *); static void merge_diradd(struct inodedep *, struct diradd *); static void complete_diradd(struct diradd *); static struct diradd *diradd_lookup(struct pagedep *, int); static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *, struct jremref *); static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *, struct jremref *); static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *, struct jremref *, struct jremref *); static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *, struct jremref *); static void cancel_allocindir(struct allocindir *, struct buf *bp, struct freeblks *, int); static int setup_trunc_indir(struct freeblks *, struct inode *, ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t); static void complete_trunc_indir(struct freework *); static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *, int); static void complete_mkdir(struct mkdir *); static void free_newdirblk(struct newdirblk *); static void free_jremref(struct jremref *); static void free_jaddref(struct jaddref *); static void free_jsegdep(struct jsegdep *); static void free_jsegs(struct jblocks *); static void rele_jseg(struct jseg *); static void free_jseg(struct jseg *, struct jblocks *); static void free_jnewblk(struct jnewblk *); static void free_jblkdep(struct jblkdep *); static void free_jfreefrag(struct jfreefrag *); static void free_freedep(struct freedep *); static void journal_jremref(struct dirrem *, struct jremref *, struct inodedep *); static void cancel_jnewblk(struct jnewblk *, struct workhead *); static int cancel_jaddref(struct jaddref *, struct inodedep *, struct workhead *); static void cancel_jfreefrag(struct jfreefrag *); static inline void setup_freedirect(struct freeblks *, struct inode *, int, int); static inline void setup_freeext(struct freeblks *, struct inode *, int, int); static inline void setup_freeindir(struct freeblks *, struct inode *, int, ufs_lbn_t, int); static inline struct freeblks *newfreeblks(struct mount *, struct inode *); static void freeblks_free(struct ufsmount *, struct freeblks *, int); static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t); static ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t); static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int); static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t, int, int); static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int); static int cancel_pagedep(struct pagedep *, struct freeblks *, int); static int deallocate_dependencies(struct buf *, struct freeblks *, int); static void newblk_freefrag(struct newblk*); static void free_newblk(struct newblk *); static void cancel_allocdirect(struct allocdirectlst *, struct allocdirect *, struct freeblks *); static int check_inode_unwritten(struct inodedep *); static int free_inodedep(struct inodedep *); static void freework_freeblock(struct freework *, u_long); static void freework_enqueue(struct freework *); static int handle_workitem_freeblocks(struct freeblks *, int); static int handle_complete_freeblocks(struct freeblks *, int); static void handle_workitem_indirblk(struct freework *); static void handle_written_freework(struct freework *); static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); static struct worklist *jnewblk_merge(struct worklist *, struct worklist *, struct workhead *); static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *, struct inodedep *, struct allocindir *, ufs_lbn_t); static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, ufs2_daddr_t, ufs_lbn_t); static void handle_workitem_freefrag(struct freefrag *); static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long, ufs_lbn_t, u_long); static void allocdirect_merge(struct allocdirectlst *, struct allocdirect *, struct allocdirect *); static struct freefrag *allocindir_merge(struct allocindir *, struct allocindir *); static int bmsafemap_find(struct bmsafemap_hashhead *, int, struct bmsafemap **); static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *, int cg, struct bmsafemap *); static int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int, struct newblk **); static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **); static int inodedep_find(struct inodedep_hashhead *, ino_t, struct inodedep **); static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **); static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t, int, struct pagedep **); static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t, struct pagedep **); static void pause_timer(void *); static int request_cleanup(struct mount *, int); static int softdep_request_cleanup_flush(struct mount *, struct ufsmount *); static void schedule_cleanup(struct mount *); static void softdep_ast_cleanup_proc(struct thread *); static struct ufsmount *softdep_bp_to_mp(struct buf *bp); static int process_worklist_item(struct mount *, int, int); static void process_removes(struct vnode *); static void process_truncates(struct vnode *); static void jwork_move(struct workhead *, struct workhead *); static void jwork_insert(struct workhead *, struct jsegdep *); static void add_to_worklist(struct worklist *, int); static void wake_worklist(struct worklist *); static void wait_worklist(struct worklist *, char *); static void remove_from_worklist(struct worklist *); static void softdep_flush(void *); static void softdep_flushjournal(struct mount *); static int softdep_speedup(struct ufsmount *); static void worklist_speedup(struct mount *); static int journal_mount(struct mount *, struct fs *, struct ucred *); static void journal_unmount(struct ufsmount *); static int journal_space(struct ufsmount *, int); static void journal_suspend(struct ufsmount *); static int journal_unsuspend(struct ufsmount *ump); static void softdep_prelink(struct vnode *, struct vnode *); static void add_to_journal(struct worklist *); static void remove_from_journal(struct worklist *); static bool softdep_excess_items(struct ufsmount *, int); static void softdep_process_journal(struct mount *, struct worklist *, int); static struct jremref *newjremref(struct dirrem *, struct inode *, struct inode *ip, off_t, nlink_t); static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t, uint16_t); static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t, uint16_t); static inline struct jsegdep *inoref_jseg(struct inoref *); static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t); static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t, ufs2_daddr_t, int); static void adjust_newfreework(struct freeblks *, int); static struct jtrunc *newjtrunc(struct freeblks *, off_t, int); static void move_newblock_dep(struct jaddref *, struct inodedep *); static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t); static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *, ufs2_daddr_t, long, ufs_lbn_t); static struct freework *newfreework(struct ufsmount *, struct freeblks *, struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int); static int jwait(struct worklist *, int); static struct inodedep *inodedep_lookup_ip(struct inode *); static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *); static struct freefile *handle_bufwait(struct inodedep *, struct workhead *); static void handle_jwork(struct workhead *); static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *, struct mkdir **); static struct jblocks *jblocks_create(void); static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *); static void jblocks_free(struct jblocks *, struct mount *, int); static void jblocks_destroy(struct jblocks *); static void jblocks_add(struct jblocks *, ufs2_daddr_t, int); /* * Exported softdep operations. */ static void softdep_disk_io_initiation(struct buf *); static void softdep_disk_write_complete(struct buf *); static void softdep_deallocate_dependencies(struct buf *); static int softdep_count_dependencies(struct buf *bp, int); /* * Global lock over all of soft updates. */ static struct mtx lk; MTX_SYSINIT(softdep_lock, &lk, "Global Softdep Lock", MTX_DEF); #define ACQUIRE_GBLLOCK(lk) mtx_lock(lk) #define FREE_GBLLOCK(lk) mtx_unlock(lk) #define GBLLOCK_OWNED(lk) mtx_assert((lk), MA_OWNED) /* * Per-filesystem soft-updates locking. */ #define LOCK_PTR(ump) (&(ump)->um_softdep->sd_fslock) #define TRY_ACQUIRE_LOCK(ump) rw_try_wlock(&(ump)->um_softdep->sd_fslock) #define ACQUIRE_LOCK(ump) rw_wlock(&(ump)->um_softdep->sd_fslock) #define FREE_LOCK(ump) rw_wunlock(&(ump)->um_softdep->sd_fslock) #define LOCK_OWNED(ump) rw_assert(&(ump)->um_softdep->sd_fslock, \ RA_WLOCKED) #define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock) #define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock) /* * Worklist queue management. * These routines require that the lock be held. */ #ifndef /* NOT */ DEBUG #define WORKLIST_INSERT(head, item) do { \ (item)->wk_state |= ONWORKLIST; \ LIST_INSERT_HEAD(head, item, wk_list); \ } while (0) #define WORKLIST_REMOVE(item) do { \ (item)->wk_state &= ~ONWORKLIST; \ LIST_REMOVE(item, wk_list); \ } while (0) #define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT #define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE #else /* DEBUG */ static void worklist_insert(struct workhead *, struct worklist *, int); static void worklist_remove(struct worklist *, int); #define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1) #define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0) #define WORKLIST_REMOVE(item) worklist_remove(item, 1) #define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0) static void worklist_insert(head, item, locked) struct workhead *head; struct worklist *item; int locked; { if (locked) LOCK_OWNED(VFSTOUFS(item->wk_mp)); if (item->wk_state & ONWORKLIST) panic("worklist_insert: %p %s(0x%X) already on list", item, TYPENAME(item->wk_type), item->wk_state); item->wk_state |= ONWORKLIST; LIST_INSERT_HEAD(head, item, wk_list); } static void worklist_remove(item, locked) struct worklist *item; int locked; { if (locked) LOCK_OWNED(VFSTOUFS(item->wk_mp)); if ((item->wk_state & ONWORKLIST) == 0) panic("worklist_remove: %p %s(0x%X) not on list", item, TYPENAME(item->wk_type), item->wk_state); item->wk_state &= ~ONWORKLIST; LIST_REMOVE(item, wk_list); } #endif /* DEBUG */ /* * Merge two jsegdeps keeping only the oldest one as newer references * can't be discarded until after older references. */ static inline struct jsegdep * jsegdep_merge(struct jsegdep *one, struct jsegdep *two) { struct jsegdep *swp; if (two == NULL) return (one); if (one->jd_seg->js_seq > two->jd_seg->js_seq) { swp = one; one = two; two = swp; } WORKLIST_REMOVE(&two->jd_list); free_jsegdep(two); return (one); } /* * If two freedeps are compatible free one to reduce list size. */ static inline struct freedep * freedep_merge(struct freedep *one, struct freedep *two) { if (two == NULL) return (one); if (one->fd_freework == two->fd_freework) { WORKLIST_REMOVE(&two->fd_list); free_freedep(two); } return (one); } /* * Move journal work from one list to another. Duplicate freedeps and * jsegdeps are coalesced to keep the lists as small as possible. */ static void jwork_move(dst, src) struct workhead *dst; struct workhead *src; { struct freedep *freedep; struct jsegdep *jsegdep; struct worklist *wkn; struct worklist *wk; KASSERT(dst != src, ("jwork_move: dst == src")); freedep = NULL; jsegdep = NULL; LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) { if (wk->wk_type == D_JSEGDEP) jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); else if (wk->wk_type == D_FREEDEP) freedep = freedep_merge(WK_FREEDEP(wk), freedep); } while ((wk = LIST_FIRST(src)) != NULL) { WORKLIST_REMOVE(wk); WORKLIST_INSERT(dst, wk); if (wk->wk_type == D_JSEGDEP) { jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); continue; } if (wk->wk_type == D_FREEDEP) freedep = freedep_merge(WK_FREEDEP(wk), freedep); } } static void jwork_insert(dst, jsegdep) struct workhead *dst; struct jsegdep *jsegdep; { struct jsegdep *jsegdepn; struct worklist *wk; LIST_FOREACH(wk, dst, wk_list) if (wk->wk_type == D_JSEGDEP) break; if (wk == NULL) { WORKLIST_INSERT(dst, &jsegdep->jd_list); return; } jsegdepn = WK_JSEGDEP(wk); if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) { WORKLIST_REMOVE(wk); free_jsegdep(jsegdepn); WORKLIST_INSERT(dst, &jsegdep->jd_list); } else free_jsegdep(jsegdep); } /* * Routines for tracking and managing workitems. */ static void workitem_free(struct worklist *, int); static void workitem_alloc(struct worklist *, int, struct mount *); static void workitem_reassign(struct worklist *, int); #define WORKITEM_FREE(item, type) \ workitem_free((struct worklist *)(item), (type)) #define WORKITEM_REASSIGN(item, type) \ workitem_reassign((struct worklist *)(item), (type)) static void workitem_free(item, type) struct worklist *item; int type; { struct ufsmount *ump; #ifdef DEBUG if (item->wk_state & ONWORKLIST) panic("workitem_free: %s(0x%X) still on list", TYPENAME(item->wk_type), item->wk_state); if (item->wk_type != type && type != D_NEWBLK) panic("workitem_free: type mismatch %s != %s", TYPENAME(item->wk_type), TYPENAME(type)); #endif if (item->wk_state & IOWAITING) wakeup(item); ump = VFSTOUFS(item->wk_mp); LOCK_OWNED(ump); KASSERT(ump->softdep_deps > 0, ("workitem_free: %s: softdep_deps going negative", ump->um_fs->fs_fsmnt)); if (--ump->softdep_deps == 0 && ump->softdep_req) wakeup(&ump->softdep_deps); KASSERT(dep_current[item->wk_type] > 0, ("workitem_free: %s: dep_current[%s] going negative", ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); KASSERT(ump->softdep_curdeps[item->wk_type] > 0, ("workitem_free: %s: softdep_curdeps[%s] going negative", ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); atomic_subtract_long(&dep_current[item->wk_type], 1); ump->softdep_curdeps[item->wk_type] -= 1; free(item, DtoM(type)); } static void workitem_alloc(item, type, mp) struct worklist *item; int type; struct mount *mp; { struct ufsmount *ump; item->wk_type = type; item->wk_mp = mp; item->wk_state = 0; ump = VFSTOUFS(mp); ACQUIRE_GBLLOCK(&lk); dep_current[type]++; if (dep_current[type] > dep_highuse[type]) dep_highuse[type] = dep_current[type]; dep_total[type]++; FREE_GBLLOCK(&lk); ACQUIRE_LOCK(ump); ump->softdep_curdeps[type] += 1; ump->softdep_deps++; ump->softdep_accdeps++; FREE_LOCK(ump); } static void workitem_reassign(item, newtype) struct worklist *item; int newtype; { struct ufsmount *ump; ump = VFSTOUFS(item->wk_mp); LOCK_OWNED(ump); KASSERT(ump->softdep_curdeps[item->wk_type] > 0, ("workitem_reassign: %s: softdep_curdeps[%s] going negative", VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); ump->softdep_curdeps[item->wk_type] -= 1; ump->softdep_curdeps[newtype] += 1; KASSERT(dep_current[item->wk_type] > 0, ("workitem_reassign: %s: dep_current[%s] going negative", VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); ACQUIRE_GBLLOCK(&lk); dep_current[newtype]++; dep_current[item->wk_type]--; if (dep_current[newtype] > dep_highuse[newtype]) dep_highuse[newtype] = dep_current[newtype]; dep_total[newtype]++; FREE_GBLLOCK(&lk); item->wk_type = newtype; } /* * Workitem queue management */ static int max_softdeps; /* maximum number of structs before slowdown */ static int tickdelay = 2; /* number of ticks to pause during slowdown */ static int proc_waiting; /* tracks whether we have a timeout posted */ static int *stat_countp; /* statistic to count in proc_waiting timeout */ static struct callout softdep_callout; static int req_clear_inodedeps; /* syncer process flush some inodedeps */ static int req_clear_remove; /* syncer process flush some freeblks */ static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */ /* * runtime statistics */ static int stat_flush_threads; /* number of softdep flushing threads */ static int stat_worklist_push; /* number of worklist cleanups */ static int stat_blk_limit_push; /* number of times block limit neared */ static int stat_ino_limit_push; /* number of times inode limit neared */ static int stat_blk_limit_hit; /* number of times block slowdown imposed */ static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */ static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */ static int stat_journal_min; /* Times hit journal min threshold */ static int stat_journal_low; /* Times hit journal low threshold */ static int stat_journal_wait; /* Times blocked in jwait(). */ static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */ static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */ static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */ static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */ static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */ static int stat_cleanup_blkrequests; /* Number of block cleanup requests */ static int stat_cleanup_inorequests; /* Number of inode cleanup requests */ static int stat_cleanup_retries; /* Number of cleanups that needed to flush */ static int stat_cleanup_failures; /* Number of cleanup requests that failed */ static int stat_emptyjblocks; /* Number of potentially empty journal blocks */ SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD, &stat_flush_threads, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW, &stat_jaddref, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW, &stat_jnewblk, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW, &stat_journal_low, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW, &stat_journal_min, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW, &stat_journal_wait, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW, &stat_jwait_filepage, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW, &stat_jwait_freeblks, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW, &stat_jwait_inode, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW, &stat_jwait_newblk, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW, &stat_cleanup_blkrequests, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW, &stat_cleanup_inorequests, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW, &stat_cleanup_high_delay, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW, &stat_cleanup_retries, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW, &stat_cleanup_failures, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW, &softdep_flushcache, 0, ""); SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD, &stat_emptyjblocks, 0, ""); SYSCTL_DECL(_vfs_ffs); /* Whether to recompute the summary at mount time */ static int compute_summary_at_mount = 0; SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW, &compute_summary_at_mount, 0, "Recompute summary at mount"); static int print_threads = 0; SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW, &print_threads, 0, "Notify flusher thread start/stop"); /* List of all filesystems mounted with soft updates */ static TAILQ_HEAD(, mount_softdeps) softdepmounts; /* * This function cleans the worklist for a filesystem. * Each filesystem running with soft dependencies gets its own * thread to run in this function. The thread is started up in * softdep_mount and shutdown in softdep_unmount. They show up * as part of the kernel "bufdaemon" process whose process * entry is available in bufdaemonproc. */ static int searchfailed; extern struct proc *bufdaemonproc; static void softdep_flush(addr) void *addr; { struct mount *mp; struct thread *td; struct ufsmount *ump; td = curthread; td->td_pflags |= TDP_NORUNNINGBUF; mp = (struct mount *)addr; ump = VFSTOUFS(mp); atomic_add_int(&stat_flush_threads, 1); ACQUIRE_LOCK(ump); ump->softdep_flags &= ~FLUSH_STARTING; wakeup(&ump->softdep_flushtd); FREE_LOCK(ump); if (print_threads) { if (stat_flush_threads == 1) printf("Running %s at pid %d\n", bufdaemonproc->p_comm, bufdaemonproc->p_pid); printf("Start thread %s\n", td->td_name); } for (;;) { while (softdep_process_worklist(mp, 0) > 0 || (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended)) kthread_suspend_check(); ACQUIRE_LOCK(ump); if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdflush", hz / 2); ump->softdep_flags &= ~FLUSH_CLEANUP; /* * Check to see if we are done and need to exit. */ if ((ump->softdep_flags & FLUSH_EXIT) == 0) { FREE_LOCK(ump); continue; } ump->softdep_flags &= ~FLUSH_EXIT; FREE_LOCK(ump); wakeup(&ump->softdep_flags); if (print_threads) printf("Stop thread %s: searchfailed %d, did cleanups %d\n", td->td_name, searchfailed, ump->um_softdep->sd_cleanups); atomic_subtract_int(&stat_flush_threads, 1); kthread_exit(); panic("kthread_exit failed\n"); } } static void worklist_speedup(mp) struct mount *mp; { struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) ump->softdep_flags |= FLUSH_CLEANUP; wakeup(&ump->softdep_flushtd); } static int softdep_speedup(ump) struct ufsmount *ump; { struct ufsmount *altump; struct mount_softdeps *sdp; LOCK_OWNED(ump); worklist_speedup(ump->um_mountp); bd_speedup(); /* * If we have global shortages, then we need other * filesystems to help with the cleanup. Here we wakeup a * flusher thread for a filesystem that is over its fair * share of resources. */ if (req_clear_inodedeps || req_clear_remove) { ACQUIRE_GBLLOCK(&lk); TAILQ_FOREACH(sdp, &softdepmounts, sd_next) { if ((altump = sdp->sd_ump) == ump) continue; if (((req_clear_inodedeps && altump->softdep_curdeps[D_INODEDEP] > max_softdeps / stat_flush_threads) || (req_clear_remove && altump->softdep_curdeps[D_DIRREM] > (max_softdeps / 2) / stat_flush_threads)) && TRY_ACQUIRE_LOCK(altump)) break; } if (sdp == NULL) { searchfailed++; FREE_GBLLOCK(&lk); } else { /* * Move to the end of the list so we pick a * different one on out next try. */ TAILQ_REMOVE(&softdepmounts, sdp, sd_next); TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); FREE_GBLLOCK(&lk); if ((altump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) altump->softdep_flags |= FLUSH_CLEANUP; altump->um_softdep->sd_cleanups++; wakeup(&altump->softdep_flushtd); FREE_LOCK(altump); } } return (speedup_syncer()); } /* * Add an item to the end of the work queue. * This routine requires that the lock be held. * This is the only routine that adds items to the list. * The following routine is the only one that removes items * and does so in order from first to last. */ #define WK_HEAD 0x0001 /* Add to HEAD. */ #define WK_NODELAY 0x0002 /* Process immediately. */ static void add_to_worklist(wk, flags) struct worklist *wk; int flags; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); LOCK_OWNED(ump); if (wk->wk_state & ONWORKLIST) panic("add_to_worklist: %s(0x%X) already on list", TYPENAME(wk->wk_type), wk->wk_state); wk->wk_state |= ONWORKLIST; if (ump->softdep_on_worklist == 0) { LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); ump->softdep_worklist_tail = wk; } else if (flags & WK_HEAD) { LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); } else { LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list); ump->softdep_worklist_tail = wk; } ump->softdep_on_worklist += 1; if (flags & WK_NODELAY) worklist_speedup(wk->wk_mp); } /* * Remove the item to be processed. If we are removing the last * item on the list, we need to recalculate the tail pointer. */ static void remove_from_worklist(wk) struct worklist *wk; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); if (ump->softdep_worklist_tail == wk) ump->softdep_worklist_tail = (struct worklist *)wk->wk_list.le_prev; WORKLIST_REMOVE(wk); ump->softdep_on_worklist -= 1; } static void wake_worklist(wk) struct worklist *wk; { if (wk->wk_state & IOWAITING) { wk->wk_state &= ~IOWAITING; wakeup(wk); } } static void wait_worklist(wk, wmesg) struct worklist *wk; char *wmesg; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); wk->wk_state |= IOWAITING; msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0); } /* * Process that runs once per second to handle items in the background queue. * * Note that we ensure that everything is done in the order in which they * appear in the queue. The code below depends on this property to ensure * that blocks of a file are freed before the inode itself is freed. This * ordering ensures that no new triples will be generated * until all the old ones have been purged from the dependency lists. */ static int softdep_process_worklist(mp, full) struct mount *mp; int full; { int cnt, matchcnt; struct ufsmount *ump; long starttime; KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp")); if (MOUNTEDSOFTDEP(mp) == 0) return (0); matchcnt = 0; ump = VFSTOUFS(mp); ACQUIRE_LOCK(ump); starttime = time_second; softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0); check_clear_deps(mp); while (ump->softdep_on_worklist > 0) { if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0) break; else matchcnt += cnt; check_clear_deps(mp); /* * We do not generally want to stop for buffer space, but if * we are really being a buffer hog, we will stop and wait. */ if (should_yield()) { FREE_LOCK(ump); kern_yield(PRI_USER); bwillwrite(); ACQUIRE_LOCK(ump); } /* * Never allow processing to run for more than one * second. This gives the syncer thread the opportunity * to pause if appropriate. */ if (!full && starttime != time_second) break; } if (full == 0) journal_unsuspend(ump); FREE_LOCK(ump); return (matchcnt); } /* * Process all removes associated with a vnode if we are running out of * journal space. Any other process which attempts to flush these will * be unable as we have the vnodes locked. */ static void process_removes(vp) struct vnode *vp; { struct inodedep *inodedep; struct dirrem *dirrem; struct ufsmount *ump; struct mount *mp; ino_t inum; mp = vp->v_mount; ump = VFSTOUFS(mp); LOCK_OWNED(ump); inum = VTOI(vp)->i_number; for (;;) { top: if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) return; LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) { /* * If another thread is trying to lock this vnode * it will fail but we must wait for it to do so * before we can proceed. */ if (dirrem->dm_state & INPROGRESS) { wait_worklist(&dirrem->dm_list, "pwrwait"); goto top; } if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) == (COMPLETE | ONWORKLIST)) break; } if (dirrem == NULL) return; remove_from_worklist(&dirrem->dm_list); FREE_LOCK(ump); if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) panic("process_removes: suspended filesystem"); handle_workitem_remove(dirrem, 0); vn_finished_secondary_write(mp); ACQUIRE_LOCK(ump); } } /* * Process all truncations associated with a vnode if we are running out * of journal space. This is called when the vnode lock is already held * and no other process can clear the truncation. This function returns * a value greater than zero if it did any work. */ static void process_truncates(vp) struct vnode *vp; { struct inodedep *inodedep; struct freeblks *freeblks; struct ufsmount *ump; struct mount *mp; ino_t inum; int cgwait; mp = vp->v_mount; ump = VFSTOUFS(mp); LOCK_OWNED(ump); inum = VTOI(vp)->i_number; for (;;) { if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) return; cgwait = 0; TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) { /* Journal entries not yet written. */ if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) { jwait(&LIST_FIRST( &freeblks->fb_jblkdephd)->jb_list, MNT_WAIT); break; } /* Another thread is executing this item. */ if (freeblks->fb_state & INPROGRESS) { wait_worklist(&freeblks->fb_list, "ptrwait"); break; } /* Freeblks is waiting on a inode write. */ if ((freeblks->fb_state & COMPLETE) == 0) { FREE_LOCK(ump); ffs_update(vp, 1); ACQUIRE_LOCK(ump); break; } if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) == (ALLCOMPLETE | ONWORKLIST)) { remove_from_worklist(&freeblks->fb_list); freeblks->fb_state |= INPROGRESS; FREE_LOCK(ump); if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) panic("process_truncates: " "suspended filesystem"); handle_workitem_freeblocks(freeblks, 0); vn_finished_secondary_write(mp); ACQUIRE_LOCK(ump); break; } if (freeblks->fb_cgwait) cgwait++; } if (cgwait) { FREE_LOCK(ump); sync_cgs(mp, MNT_WAIT); ffs_sync_snap(mp, MNT_WAIT); ACQUIRE_LOCK(ump); continue; } if (freeblks == NULL) break; } return; } /* * Process one item on the worklist. */ static int process_worklist_item(mp, target, flags) struct mount *mp; int target; int flags; { struct worklist sentinel; struct worklist *wk; struct ufsmount *ump; int matchcnt; int error; KASSERT(mp != NULL, ("process_worklist_item: NULL mp")); /* * If we are being called because of a process doing a * copy-on-write, then it is not safe to write as we may * recurse into the copy-on-write routine. */ if (curthread->td_pflags & TDP_COWINPROGRESS) return (-1); PHOLD(curproc); /* Don't let the stack go away. */ ump = VFSTOUFS(mp); LOCK_OWNED(ump); matchcnt = 0; sentinel.wk_mp = NULL; sentinel.wk_type = D_SENTINEL; LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list); for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL; wk = LIST_NEXT(&sentinel, wk_list)) { if (wk->wk_type == D_SENTINEL) { LIST_REMOVE(&sentinel, wk_list); LIST_INSERT_AFTER(wk, &sentinel, wk_list); continue; } if (wk->wk_state & INPROGRESS) panic("process_worklist_item: %p already in progress.", wk); wk->wk_state |= INPROGRESS; remove_from_worklist(wk); FREE_LOCK(ump); if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) panic("process_worklist_item: suspended filesystem"); switch (wk->wk_type) { case D_DIRREM: /* removal of a directory entry */ error = handle_workitem_remove(WK_DIRREM(wk), flags); break; case D_FREEBLKS: /* releasing blocks and/or fragments from a file */ error = handle_workitem_freeblocks(WK_FREEBLKS(wk), flags); break; case D_FREEFRAG: /* releasing a fragment when replaced as a file grows */ handle_workitem_freefrag(WK_FREEFRAG(wk)); error = 0; break; case D_FREEFILE: /* releasing an inode when its link count drops to 0 */ handle_workitem_freefile(WK_FREEFILE(wk)); error = 0; break; default: panic("%s_process_worklist: Unknown type %s", "softdep", TYPENAME(wk->wk_type)); /* NOTREACHED */ } vn_finished_secondary_write(mp); ACQUIRE_LOCK(ump); if (error == 0) { if (++matchcnt == target) break; continue; } /* * We have to retry the worklist item later. Wake up any * waiters who may be able to complete it immediately and * add the item back to the head so we don't try to execute * it again. */ wk->wk_state &= ~INPROGRESS; wake_worklist(wk); add_to_worklist(wk, WK_HEAD); } /* Sentinal could've become the tail from remove_from_worklist. */ if (ump->softdep_worklist_tail == &sentinel) ump->softdep_worklist_tail = (struct worklist *)sentinel.wk_list.le_prev; LIST_REMOVE(&sentinel, wk_list); PRELE(curproc); return (matchcnt); } /* * Move dependencies from one buffer to another. */ int softdep_move_dependencies(oldbp, newbp) struct buf *oldbp; struct buf *newbp; { struct worklist *wk, *wktail; struct ufsmount *ump; int dirty; if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL) return (0); KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, ("softdep_move_dependencies called on non-softdep filesystem")); dirty = 0; wktail = NULL; ump = VFSTOUFS(wk->wk_mp); ACQUIRE_LOCK(ump); while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { LIST_REMOVE(wk, wk_list); if (wk->wk_type == D_BMSAFEMAP && bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp)) dirty = 1; if (wktail == NULL) LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); else LIST_INSERT_AFTER(wktail, wk, wk_list); wktail = wk; } FREE_LOCK(ump); return (dirty); } /* * Purge the work list of all items associated with a particular mount point. */ int softdep_flushworklist(oldmnt, countp, td) struct mount *oldmnt; int *countp; struct thread *td; { struct vnode *devvp; struct ufsmount *ump; int count, error; /* * Alternately flush the block device associated with the mount * point and process any dependencies that the flushing * creates. We continue until no more worklist dependencies * are found. */ *countp = 0; error = 0; ump = VFSTOUFS(oldmnt); devvp = ump->um_devvp; while ((count = softdep_process_worklist(oldmnt, 1)) > 0) { *countp += count; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(devvp, MNT_WAIT, td); VOP_UNLOCK(devvp, 0); if (error != 0) break; } return (error); } #define SU_WAITIDLE_RETRIES 20 static int softdep_waitidle(struct mount *mp, int flags __unused) { struct ufsmount *ump; struct vnode *devvp; struct thread *td; int error, i; ump = VFSTOUFS(mp); devvp = ump->um_devvp; td = curthread; error = 0; ACQUIRE_LOCK(ump); for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) { ump->softdep_req = 1; KASSERT((flags & FORCECLOSE) == 0 || ump->softdep_on_worklist == 0, ("softdep_waitidle: work added after flush")); msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP, "softdeps", 10 * hz); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(devvp, MNT_WAIT, td); VOP_UNLOCK(devvp, 0); ACQUIRE_LOCK(ump); if (error != 0) break; } ump->softdep_req = 0; if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) { error = EBUSY; printf("softdep_waitidle: Failed to flush worklist for %p\n", mp); } FREE_LOCK(ump); return (error); } /* * Flush all vnodes and worklist items associated with a specified mount point. */ int softdep_flushfiles(oldmnt, flags, td) struct mount *oldmnt; int flags; struct thread *td; { #ifdef QUOTA struct ufsmount *ump; int i; #endif int error, early, depcount, loopcnt, retry_flush_count, retry; int morework; KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0, ("softdep_flushfiles called on non-softdep filesystem")); loopcnt = 10; retry_flush_count = 3; retry_flush: error = 0; /* * Alternately flush the vnodes associated with the mount * point and process any dependencies that the flushing * creates. In theory, this loop can happen at most twice, * but we give it a few extra just to be sure. */ for (; loopcnt > 0; loopcnt--) { /* * Do another flush in case any vnodes were brought in * as part of the cleanup operations. */ early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH; if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0) break; if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 || depcount == 0) break; } /* * If we are unmounting then it is an error to fail. If we * are simply trying to downgrade to read-only, then filesystem * activity can keep us busy forever, so we just fail with EBUSY. */ if (loopcnt == 0) { if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) panic("softdep_flushfiles: looping"); error = EBUSY; } if (!error) error = softdep_waitidle(oldmnt, flags); if (!error) { if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) { retry = 0; MNT_ILOCK(oldmnt); KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0, ("softdep_flushfiles: !MNTK_NOINSMNTQ")); morework = oldmnt->mnt_nvnodelistsize > 0; #ifdef QUOTA ump = VFSTOUFS(oldmnt); UFS_LOCK(ump); for (i = 0; i < MAXQUOTAS; i++) { if (ump->um_quotas[i] != NULLVP) morework = 1; } UFS_UNLOCK(ump); #endif if (morework) { if (--retry_flush_count > 0) { retry = 1; loopcnt = 3; } else error = EBUSY; } MNT_IUNLOCK(oldmnt); if (retry) goto retry_flush; } } return (error); } /* * Structure hashing. * * There are four types of structures that can be looked up: * 1) pagedep structures identified by mount point, inode number, * and logical block. * 2) inodedep structures identified by mount point and inode number. * 3) newblk structures identified by mount point and * physical block number. * 4) bmsafemap structures identified by mount point and * cylinder group number. * * The "pagedep" and "inodedep" dependency structures are hashed * separately from the file blocks and inodes to which they correspond. * This separation helps when the in-memory copy of an inode or * file block must be replaced. It also obviates the need to access * an inode or file page when simply updating (or de-allocating) * dependency structures. Lookup of newblk structures is needed to * find newly allocated blocks when trying to associate them with * their allocdirect or allocindir structure. * * The lookup routines optionally create and hash a new instance when * an existing entry is not found. The bmsafemap lookup routine always * allocates a new structure if an existing one is not found. */ #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ /* * Structures and routines associated with pagedep caching. */ #define PAGEDEP_HASH(ump, inum, lbn) \ (&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size]) static int pagedep_find(pagedephd, ino, lbn, pagedeppp) struct pagedep_hashhead *pagedephd; ino_t ino; ufs_lbn_t lbn; struct pagedep **pagedeppp; { struct pagedep *pagedep; LIST_FOREACH(pagedep, pagedephd, pd_hash) { if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) { *pagedeppp = pagedep; return (1); } } *pagedeppp = NULL; return (0); } /* * Look up a pagedep. Return 1 if found, 0 otherwise. * If not found, allocate if DEPALLOC flag is passed. * Found or allocated entry is returned in pagedeppp. * This routine must be called with splbio interrupts blocked. */ static int pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp) struct mount *mp; struct buf *bp; ino_t ino; ufs_lbn_t lbn; int flags; struct pagedep **pagedeppp; { struct pagedep *pagedep; struct pagedep_hashhead *pagedephd; struct worklist *wk; struct ufsmount *ump; int ret; int i; ump = VFSTOUFS(mp); LOCK_OWNED(ump); if (bp) { LIST_FOREACH(wk, &bp->b_dep, wk_list) { if (wk->wk_type == D_PAGEDEP) { *pagedeppp = WK_PAGEDEP(wk); return (1); } } } pagedephd = PAGEDEP_HASH(ump, ino, lbn); ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); if (ret) { if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp) WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list); return (1); } if ((flags & DEPALLOC) == 0) return (0); FREE_LOCK(ump); pagedep = malloc(sizeof(struct pagedep), M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO); workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp); ACQUIRE_LOCK(ump); ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); if (*pagedeppp) { /* * This should never happen since we only create pagedeps * with the vnode lock held. Could be an assert. */ WORKITEM_FREE(pagedep, D_PAGEDEP); return (ret); } pagedep->pd_ino = ino; pagedep->pd_lbn = lbn; LIST_INIT(&pagedep->pd_dirremhd); LIST_INIT(&pagedep->pd_pendinghd); for (i = 0; i < DAHASHSZ; i++) LIST_INIT(&pagedep->pd_diraddhd[i]); LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); *pagedeppp = pagedep; return (0); } /* * Structures and routines associated with inodedep caching. */ #define INODEDEP_HASH(ump, inum) \ (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size]) static int inodedep_find(inodedephd, inum, inodedeppp) struct inodedep_hashhead *inodedephd; ino_t inum; struct inodedep **inodedeppp; { struct inodedep *inodedep; LIST_FOREACH(inodedep, inodedephd, id_hash) if (inum == inodedep->id_ino) break; if (inodedep) { *inodedeppp = inodedep; return (1); } *inodedeppp = NULL; return (0); } /* * Look up an inodedep. Return 1 if found, 0 if not found. * If not found, allocate if DEPALLOC flag is passed. * Found or allocated entry is returned in inodedeppp. * This routine must be called with splbio interrupts blocked. */ static int inodedep_lookup(mp, inum, flags, inodedeppp) struct mount *mp; ino_t inum; int flags; struct inodedep **inodedeppp; { struct inodedep *inodedep; struct inodedep_hashhead *inodedephd; struct ufsmount *ump; struct fs *fs; ump = VFSTOUFS(mp); LOCK_OWNED(ump); fs = ump->um_fs; inodedephd = INODEDEP_HASH(ump, inum); if (inodedep_find(inodedephd, inum, inodedeppp)) return (1); if ((flags & DEPALLOC) == 0) return (0); /* * If the system is over its limit and our filesystem is * responsible for more than our share of that usage and * we are not in a rush, request some inodedep cleanup. */ if (softdep_excess_items(ump, D_INODEDEP)) schedule_cleanup(mp); else FREE_LOCK(ump); inodedep = malloc(sizeof(struct inodedep), M_INODEDEP, M_SOFTDEP_FLAGS); workitem_alloc(&inodedep->id_list, D_INODEDEP, mp); ACQUIRE_LOCK(ump); if (inodedep_find(inodedephd, inum, inodedeppp)) { WORKITEM_FREE(inodedep, D_INODEDEP); return (1); } inodedep->id_fs = fs; inodedep->id_ino = inum; inodedep->id_state = ALLCOMPLETE; inodedep->id_nlinkdelta = 0; inodedep->id_savedino1 = NULL; inodedep->id_savedsize = -1; inodedep->id_savedextsize = -1; inodedep->id_savednlink = -1; inodedep->id_bmsafemap = NULL; inodedep->id_mkdiradd = NULL; LIST_INIT(&inodedep->id_dirremhd); LIST_INIT(&inodedep->id_pendinghd); LIST_INIT(&inodedep->id_inowait); LIST_INIT(&inodedep->id_bufwait); TAILQ_INIT(&inodedep->id_inoreflst); TAILQ_INIT(&inodedep->id_inoupdt); TAILQ_INIT(&inodedep->id_newinoupdt); TAILQ_INIT(&inodedep->id_extupdt); TAILQ_INIT(&inodedep->id_newextupdt); TAILQ_INIT(&inodedep->id_freeblklst); LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); *inodedeppp = inodedep; return (0); } /* * Structures and routines associated with newblk caching. */ #define NEWBLK_HASH(ump, inum) \ (&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size]) static int newblk_find(newblkhd, newblkno, flags, newblkpp) struct newblk_hashhead *newblkhd; ufs2_daddr_t newblkno; int flags; struct newblk **newblkpp; { struct newblk *newblk; LIST_FOREACH(newblk, newblkhd, nb_hash) { if (newblkno != newblk->nb_newblkno) continue; /* * If we're creating a new dependency don't match those that * have already been converted to allocdirects. This is for * a frag extend. */ if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK) continue; break; } if (newblk) { *newblkpp = newblk; return (1); } *newblkpp = NULL; return (0); } /* * Look up a newblk. Return 1 if found, 0 if not found. * If not found, allocate if DEPALLOC flag is passed. * Found or allocated entry is returned in newblkpp. */ static int newblk_lookup(mp, newblkno, flags, newblkpp) struct mount *mp; ufs2_daddr_t newblkno; int flags; struct newblk **newblkpp; { struct newblk *newblk; struct newblk_hashhead *newblkhd; struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); newblkhd = NEWBLK_HASH(ump, newblkno); if (newblk_find(newblkhd, newblkno, flags, newblkpp)) return (1); if ((flags & DEPALLOC) == 0) return (0); if (softdep_excess_items(ump, D_NEWBLK) || softdep_excess_items(ump, D_ALLOCDIRECT) || softdep_excess_items(ump, D_ALLOCINDIR)) schedule_cleanup(mp); else FREE_LOCK(ump); newblk = malloc(sizeof(union allblk), M_NEWBLK, M_SOFTDEP_FLAGS | M_ZERO); workitem_alloc(&newblk->nb_list, D_NEWBLK, mp); ACQUIRE_LOCK(ump); if (newblk_find(newblkhd, newblkno, flags, newblkpp)) { WORKITEM_FREE(newblk, D_NEWBLK); return (1); } newblk->nb_freefrag = NULL; LIST_INIT(&newblk->nb_indirdeps); LIST_INIT(&newblk->nb_newdirblk); LIST_INIT(&newblk->nb_jwork); newblk->nb_state = ATTACHED; newblk->nb_newblkno = newblkno; LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); *newblkpp = newblk; return (0); } /* * Structures and routines associated with freed indirect block caching. */ #define INDIR_HASH(ump, blkno) \ (&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size]) /* * Lookup an indirect block in the indir hash table. The freework is * removed and potentially freed. The caller must do a blocking journal * write before writing to the blkno. */ static int indirblk_lookup(mp, blkno) struct mount *mp; ufs2_daddr_t blkno; { struct freework *freework; struct indir_hashhead *wkhd; struct ufsmount *ump; ump = VFSTOUFS(mp); wkhd = INDIR_HASH(ump, blkno); TAILQ_FOREACH(freework, wkhd, fw_next) { if (freework->fw_blkno != blkno) continue; indirblk_remove(freework); return (1); } return (0); } /* * Insert an indirect block represented by freework into the indirblk * hash table so that it may prevent the block from being re-used prior * to the journal being written. */ static void indirblk_insert(freework) struct freework *freework; { struct jblocks *jblocks; struct jseg *jseg; struct ufsmount *ump; ump = VFSTOUFS(freework->fw_list.wk_mp); jblocks = ump->softdep_jblocks; jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst); if (jseg == NULL) return; LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs); TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next); freework->fw_state &= ~DEPCOMPLETE; } static void indirblk_remove(freework) struct freework *freework; { struct ufsmount *ump; ump = VFSTOUFS(freework->fw_list.wk_mp); LIST_REMOVE(freework, fw_segs); TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next); freework->fw_state |= DEPCOMPLETE; if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) WORKITEM_FREE(freework, D_FREEWORK); } /* * Executed during filesystem system initialization before * mounting any filesystems. */ void softdep_initialize() { TAILQ_INIT(&softdepmounts); #ifdef __LP64__ max_softdeps = desiredvnodes * 4; #else max_softdeps = desiredvnodes * 2; #endif /* initialise bioops hack */ bioops.io_start = softdep_disk_io_initiation; bioops.io_complete = softdep_disk_write_complete; bioops.io_deallocate = softdep_deallocate_dependencies; bioops.io_countdeps = softdep_count_dependencies; softdep_ast_cleanup = softdep_ast_cleanup_proc; /* Initialize the callout with an mtx. */ callout_init_mtx(&softdep_callout, &lk, 0); } /* * Executed after all filesystems have been unmounted during * filesystem module unload. */ void softdep_uninitialize() { /* clear bioops hack */ bioops.io_start = NULL; bioops.io_complete = NULL; bioops.io_deallocate = NULL; bioops.io_countdeps = NULL; softdep_ast_cleanup = NULL; callout_drain(&softdep_callout); } /* * Called at mount time to notify the dependency code that a * filesystem wishes to use it. */ int softdep_mount(devvp, mp, fs, cred) struct vnode *devvp; struct mount *mp; struct fs *fs; struct ucred *cred; { struct csum_total cstotal; struct mount_softdeps *sdp; struct ufsmount *ump; struct cg *cgp; struct buf *bp; u_int cyl, i; int error; sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA, M_WAITOK | M_ZERO); MNT_ILOCK(mp); mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP; if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) { mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) | MNTK_SOFTDEP | MNTK_NOASYNC; } ump = VFSTOUFS(mp); ump->um_softdep = sdp; MNT_IUNLOCK(mp); rw_init(LOCK_PTR(ump), "Per-Filesystem Softdep Lock"); sdp->sd_ump = ump; LIST_INIT(&ump->softdep_workitem_pending); LIST_INIT(&ump->softdep_journal_pending); TAILQ_INIT(&ump->softdep_unlinked); LIST_INIT(&ump->softdep_dirtycg); ump->softdep_worklist_tail = NULL; ump->softdep_on_worklist = 0; ump->softdep_deps = 0; LIST_INIT(&ump->softdep_mkdirlisthd); ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, &ump->pagedep_hash_size); ump->pagedep_nextclean = 0; ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &ump->inodedep_hash_size); ump->inodedep_nextclean = 0; ump->newblk_hashtbl = hashinit(max_softdeps / 2, M_NEWBLK, &ump->newblk_hash_size); ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, &ump->bmsafemap_hash_size); i = 1 << (ffs(desiredvnodes / 10) - 1); ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead), M_FREEWORK, M_WAITOK); ump->indir_hash_size = i - 1; for (i = 0; i <= ump->indir_hash_size; i++) TAILQ_INIT(&ump->indir_hashtbl[i]); ACQUIRE_GBLLOCK(&lk); TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); FREE_GBLLOCK(&lk); if ((fs->fs_flags & FS_SUJ) && (error = journal_mount(mp, fs, cred)) != 0) { printf("Failed to start journal: %d\n", error); softdep_unmount(mp); return (error); } /* * Start our flushing thread in the bufdaemon process. */ ACQUIRE_LOCK(ump); ump->softdep_flags |= FLUSH_STARTING; FREE_LOCK(ump); kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc, &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker", mp->mnt_stat.f_mntonname); ACQUIRE_LOCK(ump); while ((ump->softdep_flags & FLUSH_STARTING) != 0) { msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart", hz / 2); } FREE_LOCK(ump); /* * When doing soft updates, the counters in the * superblock may have gotten out of sync. Recomputation * can take a long time and can be deferred for background * fsck. However, the old behavior of scanning the cylinder * groups and recalculating them at mount time is available * by setting vfs.ffs.compute_summary_at_mount to one. */ if (compute_summary_at_mount == 0 || fs->fs_clean != 0) return (0); bzero(&cstotal, sizeof cstotal); for (cyl = 0; cyl < fs->fs_ncg; cyl++) { if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), fs->fs_cgsize, cred, &bp)) != 0) { brelse(bp); softdep_unmount(mp); return (error); } cgp = (struct cg *)bp->b_data; cstotal.cs_nffree += cgp->cg_cs.cs_nffree; cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; cstotal.cs_nifree += cgp->cg_cs.cs_nifree; cstotal.cs_ndir += cgp->cg_cs.cs_ndir; fs->fs_cs(fs, cyl) = cgp->cg_cs; brelse(bp); } #ifdef DEBUG if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); #endif bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); return (0); } void softdep_unmount(mp) struct mount *mp; { struct ufsmount *ump; #ifdef INVARIANTS int i; #endif KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_unmount called on non-softdep filesystem")); ump = VFSTOUFS(mp); MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_SOFTDEP; if (MOUNTEDSUJ(mp) == 0) { MNT_IUNLOCK(mp); } else { mp->mnt_flag &= ~MNT_SUJ; MNT_IUNLOCK(mp); journal_unmount(ump); } /* * Shut down our flushing thread. Check for NULL is if * softdep_mount errors out before the thread has been created. */ if (ump->softdep_flushtd != NULL) { ACQUIRE_LOCK(ump); ump->softdep_flags |= FLUSH_EXIT; wakeup(&ump->softdep_flushtd); msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM | PDROP, "sdwait", 0); KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0, ("Thread shutdown failed")); } /* * Free up our resources. */ ACQUIRE_GBLLOCK(&lk); TAILQ_REMOVE(&softdepmounts, ump->um_softdep, sd_next); FREE_GBLLOCK(&lk); rw_destroy(LOCK_PTR(ump)); hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size); hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size); hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size); hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP, ump->bmsafemap_hash_size); free(ump->indir_hashtbl, M_FREEWORK); #ifdef INVARIANTS for (i = 0; i <= D_LAST; i++) KASSERT(ump->softdep_curdeps[i] == 0, ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt, TYPENAME(i), ump->softdep_curdeps[i])); #endif free(ump->um_softdep, M_MOUNTDATA); } static struct jblocks * jblocks_create(void) { struct jblocks *jblocks; jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO); TAILQ_INIT(&jblocks->jb_segs); jblocks->jb_avail = 10; jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail, M_JBLOCKS, M_WAITOK | M_ZERO); return (jblocks); } static ufs2_daddr_t jblocks_alloc(jblocks, bytes, actual) struct jblocks *jblocks; int bytes; int *actual; { ufs2_daddr_t daddr; struct jextent *jext; int freecnt; int blocks; blocks = bytes / DEV_BSIZE; jext = &jblocks->jb_extent[jblocks->jb_head]; freecnt = jext->je_blocks - jblocks->jb_off; if (freecnt == 0) { jblocks->jb_off = 0; if (++jblocks->jb_head > jblocks->jb_used) jblocks->jb_head = 0; jext = &jblocks->jb_extent[jblocks->jb_head]; freecnt = jext->je_blocks; } if (freecnt > blocks) freecnt = blocks; *actual = freecnt * DEV_BSIZE; daddr = jext->je_daddr + jblocks->jb_off; jblocks->jb_off += freecnt; jblocks->jb_free -= freecnt; return (daddr); } static void jblocks_free(jblocks, mp, bytes) struct jblocks *jblocks; struct mount *mp; int bytes; { LOCK_OWNED(VFSTOUFS(mp)); jblocks->jb_free += bytes / DEV_BSIZE; if (jblocks->jb_suspended) worklist_speedup(mp); wakeup(jblocks); } static void jblocks_destroy(jblocks) struct jblocks *jblocks; { if (jblocks->jb_extent) free(jblocks->jb_extent, M_JBLOCKS); free(jblocks, M_JBLOCKS); } static void jblocks_add(jblocks, daddr, blocks) struct jblocks *jblocks; ufs2_daddr_t daddr; int blocks; { struct jextent *jext; jblocks->jb_blocks += blocks; jblocks->jb_free += blocks; jext = &jblocks->jb_extent[jblocks->jb_used]; /* Adding the first block. */ if (jext->je_daddr == 0) { jext->je_daddr = daddr; jext->je_blocks = blocks; return; } /* Extending the last extent. */ if (jext->je_daddr + jext->je_blocks == daddr) { jext->je_blocks += blocks; return; } /* Adding a new extent. */ if (++jblocks->jb_used == jblocks->jb_avail) { jblocks->jb_avail *= 2; jext = malloc(sizeof(struct jextent) * jblocks->jb_avail, M_JBLOCKS, M_WAITOK | M_ZERO); memcpy(jext, jblocks->jb_extent, sizeof(struct jextent) * jblocks->jb_used); free(jblocks->jb_extent, M_JBLOCKS); jblocks->jb_extent = jext; } jext = &jblocks->jb_extent[jblocks->jb_used]; jext->je_daddr = daddr; jext->je_blocks = blocks; return; } int softdep_journal_lookup(mp, vpp) struct mount *mp; struct vnode **vpp; { struct componentname cnp; struct vnode *dvp; ino_t sujournal; int error; error = VFS_VGET(mp, UFS_ROOTINO, LK_EXCLUSIVE, &dvp); if (error) return (error); bzero(&cnp, sizeof(cnp)); cnp.cn_nameiop = LOOKUP; cnp.cn_flags = ISLASTCN; cnp.cn_thread = curthread; cnp.cn_cred = curthread->td_ucred; cnp.cn_pnbuf = SUJ_FILE; cnp.cn_nameptr = SUJ_FILE; cnp.cn_namelen = strlen(SUJ_FILE); error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal); vput(dvp); if (error != 0) return (error); error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp); return (error); } /* * Open and verify the journal file. */ static int journal_mount(mp, fs, cred) struct mount *mp; struct fs *fs; struct ucred *cred; { struct jblocks *jblocks; struct ufsmount *ump; struct vnode *vp; struct inode *ip; ufs2_daddr_t blkno; int bcount; int error; int i; ump = VFSTOUFS(mp); ump->softdep_journal_tail = NULL; ump->softdep_on_journal = 0; ump->softdep_accdeps = 0; ump->softdep_req = 0; ump->softdep_jblocks = NULL; error = softdep_journal_lookup(mp, &vp); if (error != 0) { printf("Failed to find journal. Use tunefs to create one\n"); return (error); } ip = VTOI(vp); if (ip->i_size < SUJ_MIN) { error = ENOSPC; goto out; } bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */ jblocks = jblocks_create(); for (i = 0; i < bcount; i++) { error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL); if (error) break; jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag)); } if (error) { jblocks_destroy(jblocks); goto out; } jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */ jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */ ump->softdep_jblocks = jblocks; out: if (error == 0) { MNT_ILOCK(mp); mp->mnt_flag |= MNT_SUJ; mp->mnt_flag &= ~MNT_SOFTDEP; MNT_IUNLOCK(mp); /* * Only validate the journal contents if the * filesystem is clean, otherwise we write the logs * but they'll never be used. If the filesystem was * still dirty when we mounted it the journal is * invalid and a new journal can only be valid if it * starts from a clean mount. */ if (fs->fs_clean) { DIP_SET(ip, i_modrev, fs->fs_mtime); ip->i_flags |= IN_MODIFIED; ffs_update(vp, 1); } } vput(vp); return (error); } static void journal_unmount(ump) struct ufsmount *ump; { if (ump->softdep_jblocks) jblocks_destroy(ump->softdep_jblocks); ump->softdep_jblocks = NULL; } /* * Called when a journal record is ready to be written. Space is allocated * and the journal entry is created when the journal is flushed to stable * store. */ static void add_to_journal(wk) struct worklist *wk; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); LOCK_OWNED(ump); if (wk->wk_state & ONWORKLIST) panic("add_to_journal: %s(0x%X) already on list", TYPENAME(wk->wk_type), wk->wk_state); wk->wk_state |= ONWORKLIST | DEPCOMPLETE; if (LIST_EMPTY(&ump->softdep_journal_pending)) { ump->softdep_jblocks->jb_age = ticks; LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list); } else LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list); ump->softdep_journal_tail = wk; ump->softdep_on_journal += 1; } /* * Remove an arbitrary item for the journal worklist maintain the tail * pointer. This happens when a new operation obviates the need to * journal an old operation. */ static void remove_from_journal(wk) struct worklist *wk; { struct ufsmount *ump; ump = VFSTOUFS(wk->wk_mp); LOCK_OWNED(ump); #ifdef SUJ_DEBUG { struct worklist *wkn; LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list) if (wkn == wk) break; if (wkn == NULL) panic("remove_from_journal: %p is not in journal", wk); } #endif /* * We emulate a TAILQ to save space in most structures which do not * require TAILQ semantics. Here we must update the tail position * when removing the tail which is not the final entry. This works * only if the worklist linkage are at the beginning of the structure. */ if (ump->softdep_journal_tail == wk) ump->softdep_journal_tail = (struct worklist *)wk->wk_list.le_prev; WORKLIST_REMOVE(wk); ump->softdep_on_journal -= 1; } /* * Check for journal space as well as dependency limits so the prelink * code can throttle both journaled and non-journaled filesystems. * Threshold is 0 for low and 1 for min. */ static int journal_space(ump, thresh) struct ufsmount *ump; int thresh; { struct jblocks *jblocks; int limit, avail; jblocks = ump->softdep_jblocks; if (jblocks == NULL) return (1); /* * We use a tighter restriction here to prevent request_cleanup() * running in threads from running into locks we currently hold. * We have to be over the limit and our filesystem has to be * responsible for more than our share of that usage. */ limit = (max_softdeps / 10) * 9; if (dep_current[D_INODEDEP] > limit && ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads) return (0); if (thresh) thresh = jblocks->jb_min; else thresh = jblocks->jb_low; avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE; avail = jblocks->jb_free - avail; return (avail > thresh); } static void journal_suspend(ump) struct ufsmount *ump; { struct jblocks *jblocks; struct mount *mp; mp = UFSTOVFS(ump); jblocks = ump->softdep_jblocks; MNT_ILOCK(mp); if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { stat_journal_min++; mp->mnt_kern_flag |= MNTK_SUSPEND; mp->mnt_susp_owner = ump->softdep_flushtd; } jblocks->jb_suspended = 1; MNT_IUNLOCK(mp); } static int journal_unsuspend(struct ufsmount *ump) { struct jblocks *jblocks; struct mount *mp; mp = UFSTOVFS(ump); jblocks = ump->softdep_jblocks; if (jblocks != NULL && jblocks->jb_suspended && journal_space(ump, jblocks->jb_min)) { jblocks->jb_suspended = 0; FREE_LOCK(ump); mp->mnt_susp_owner = curthread; vfs_write_resume(mp, 0); ACQUIRE_LOCK(ump); return (1); } return (0); } /* * Called before any allocation function to be certain that there is * sufficient space in the journal prior to creating any new records. * Since in the case of block allocation we may have multiple locked * buffers at the time of the actual allocation we can not block * when the journal records are created. Doing so would create a deadlock * if any of these buffers needed to be flushed to reclaim space. Instead * we require a sufficiently large amount of available space such that * each thread in the system could have passed this allocation check and * still have sufficient free space. With 20% of a minimum journal size * of 1MB we have 6553 records available. */ int softdep_prealloc(vp, waitok) struct vnode *vp; int waitok; { struct ufsmount *ump; KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, ("softdep_prealloc called on non-softdep filesystem")); /* * Nothing to do if we are not running journaled soft updates. * If we currently hold the snapshot lock, we must avoid * handling other resources that could cause deadlock. Do not * touch quotas vnode since it is typically recursed with * other vnode locks held. */ if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)) || (vp->v_vflag & VV_SYSTEM) != 0) return (0); ump = VFSTOUFS(vp->v_mount); ACQUIRE_LOCK(ump); if (journal_space(ump, 0)) { FREE_LOCK(ump); return (0); } stat_journal_low++; FREE_LOCK(ump); if (waitok == MNT_NOWAIT) return (ENOSPC); /* * Attempt to sync this vnode once to flush any journal * work attached to it. */ if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0) ffs_syncvnode(vp, waitok, 0); ACQUIRE_LOCK(ump); process_removes(vp); process_truncates(vp); if (journal_space(ump, 0) == 0) { softdep_speedup(ump); if (journal_space(ump, 1) == 0) journal_suspend(ump); } FREE_LOCK(ump); return (0); } /* * Before adjusting a link count on a vnode verify that we have sufficient * journal space. If not, process operations that depend on the currently * locked pair of vnodes to try to flush space as the syncer, buf daemon, * and softdep flush threads can not acquire these locks to reclaim space. */ static void softdep_prelink(dvp, vp) struct vnode *dvp; struct vnode *vp; { struct ufsmount *ump; ump = VFSTOUFS(dvp->v_mount); LOCK_OWNED(ump); /* * Nothing to do if we have sufficient journal space. * If we currently hold the snapshot lock, we must avoid * handling other resources that could cause deadlock. */ if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp)))) return; stat_journal_low++; FREE_LOCK(ump); if (vp) ffs_syncvnode(vp, MNT_NOWAIT, 0); ffs_syncvnode(dvp, MNT_WAIT, 0); ACQUIRE_LOCK(ump); /* Process vp before dvp as it may create .. removes. */ if (vp) { process_removes(vp); process_truncates(vp); } process_removes(dvp); process_truncates(dvp); softdep_speedup(ump); process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT); if (journal_space(ump, 0) == 0) { softdep_speedup(ump); if (journal_space(ump, 1) == 0) journal_suspend(ump); } } static void jseg_write(ump, jseg, data) struct ufsmount *ump; struct jseg *jseg; uint8_t *data; { struct jsegrec *rec; rec = (struct jsegrec *)data; rec->jsr_seq = jseg->js_seq; rec->jsr_oldest = jseg->js_oldseq; rec->jsr_cnt = jseg->js_cnt; rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize; rec->jsr_crc = 0; rec->jsr_time = ump->um_fs->fs_mtime; } static inline void inoref_write(inoref, jseg, rec) struct inoref *inoref; struct jseg *jseg; struct jrefrec *rec; { inoref->if_jsegdep->jd_seg = jseg; rec->jr_ino = inoref->if_ino; rec->jr_parent = inoref->if_parent; rec->jr_nlink = inoref->if_nlink; rec->jr_mode = inoref->if_mode; rec->jr_diroff = inoref->if_diroff; } static void jaddref_write(jaddref, jseg, data) struct jaddref *jaddref; struct jseg *jseg; uint8_t *data; { struct jrefrec *rec; rec = (struct jrefrec *)data; rec->jr_op = JOP_ADDREF; inoref_write(&jaddref->ja_ref, jseg, rec); } static void jremref_write(jremref, jseg, data) struct jremref *jremref; struct jseg *jseg; uint8_t *data; { struct jrefrec *rec; rec = (struct jrefrec *)data; rec->jr_op = JOP_REMREF; inoref_write(&jremref->jr_ref, jseg, rec); } static void jmvref_write(jmvref, jseg, data) struct jmvref *jmvref; struct jseg *jseg; uint8_t *data; { struct jmvrec *rec; rec = (struct jmvrec *)data; rec->jm_op = JOP_MVREF; rec->jm_ino = jmvref->jm_ino; rec->jm_parent = jmvref->jm_parent; rec->jm_oldoff = jmvref->jm_oldoff; rec->jm_newoff = jmvref->jm_newoff; } static void jnewblk_write(jnewblk, jseg, data) struct jnewblk *jnewblk; struct jseg *jseg; uint8_t *data; { struct jblkrec *rec; jnewblk->jn_jsegdep->jd_seg = jseg; rec = (struct jblkrec *)data; rec->jb_op = JOP_NEWBLK; rec->jb_ino = jnewblk->jn_ino; rec->jb_blkno = jnewblk->jn_blkno; rec->jb_lbn = jnewblk->jn_lbn; rec->jb_frags = jnewblk->jn_frags; rec->jb_oldfrags = jnewblk->jn_oldfrags; } static void jfreeblk_write(jfreeblk, jseg, data) struct jfreeblk *jfreeblk; struct jseg *jseg; uint8_t *data; { struct jblkrec *rec; jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg; rec = (struct jblkrec *)data; rec->jb_op = JOP_FREEBLK; rec->jb_ino = jfreeblk->jf_ino; rec->jb_blkno = jfreeblk->jf_blkno; rec->jb_lbn = jfreeblk->jf_lbn; rec->jb_frags = jfreeblk->jf_frags; rec->jb_oldfrags = 0; } static void jfreefrag_write(jfreefrag, jseg, data) struct jfreefrag *jfreefrag; struct jseg *jseg; uint8_t *data; { struct jblkrec *rec; jfreefrag->fr_jsegdep->jd_seg = jseg; rec = (struct jblkrec *)data; rec->jb_op = JOP_FREEBLK; rec->jb_ino = jfreefrag->fr_ino; rec->jb_blkno = jfreefrag->fr_blkno; rec->jb_lbn = jfreefrag->fr_lbn; rec->jb_frags = jfreefrag->fr_frags; rec->jb_oldfrags = 0; } static void jtrunc_write(jtrunc, jseg, data) struct jtrunc *jtrunc; struct jseg *jseg; uint8_t *data; { struct jtrncrec *rec; jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg; rec = (struct jtrncrec *)data; rec->jt_op = JOP_TRUNC; rec->jt_ino = jtrunc->jt_ino; rec->jt_size = jtrunc->jt_size; rec->jt_extsize = jtrunc->jt_extsize; } static void jfsync_write(jfsync, jseg, data) struct jfsync *jfsync; struct jseg *jseg; uint8_t *data; { struct jtrncrec *rec; rec = (struct jtrncrec *)data; rec->jt_op = JOP_SYNC; rec->jt_ino = jfsync->jfs_ino; rec->jt_size = jfsync->jfs_size; rec->jt_extsize = jfsync->jfs_extsize; } static void softdep_flushjournal(mp) struct mount *mp; { struct jblocks *jblocks; struct ufsmount *ump; if (MOUNTEDSUJ(mp) == 0) return; ump = VFSTOUFS(mp); jblocks = ump->softdep_jblocks; ACQUIRE_LOCK(ump); while (ump->softdep_on_journal) { jblocks->jb_needseg = 1; softdep_process_journal(mp, NULL, MNT_WAIT); } FREE_LOCK(ump); } static void softdep_synchronize_completed(struct bio *); static void softdep_synchronize(struct bio *, struct ufsmount *, void *); static void softdep_synchronize_completed(bp) struct bio *bp; { struct jseg *oldest; struct jseg *jseg; struct ufsmount *ump; /* * caller1 marks the last segment written before we issued the * synchronize cache. */ jseg = bp->bio_caller1; if (jseg == NULL) { g_destroy_bio(bp); return; } ump = VFSTOUFS(jseg->js_list.wk_mp); ACQUIRE_LOCK(ump); oldest = NULL; /* * Mark all the journal entries waiting on the synchronize cache * as completed so they may continue on. */ while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) { jseg->js_state |= COMPLETE; oldest = jseg; jseg = TAILQ_PREV(jseg, jseglst, js_next); } /* * Restart deferred journal entry processing from the oldest * completed jseg. */ if (oldest) complete_jsegs(oldest); FREE_LOCK(ump); g_destroy_bio(bp); } /* * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering * barriers. The journal must be written prior to any blocks that depend * on it and the journal can not be released until the blocks have be * written. This code handles both barriers simultaneously. */ static void softdep_synchronize(bp, ump, caller1) struct bio *bp; struct ufsmount *ump; void *caller1; { bp->bio_cmd = BIO_FLUSH; bp->bio_flags |= BIO_ORDERED; bp->bio_data = NULL; bp->bio_offset = ump->um_cp->provider->mediasize; bp->bio_length = 0; bp->bio_done = softdep_synchronize_completed; bp->bio_caller1 = caller1; g_io_request(bp, (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private); } /* * Flush some journal records to disk. */ static void softdep_process_journal(mp, needwk, flags) struct mount *mp; struct worklist *needwk; int flags; { struct jblocks *jblocks; struct ufsmount *ump; struct worklist *wk; struct jseg *jseg; struct buf *bp; struct bio *bio; uint8_t *data; struct fs *fs; int shouldflush; int segwritten; int jrecmin; /* Minimum records per block. */ int jrecmax; /* Maximum records per block. */ int size; int cnt; int off; int devbsize; if (MOUNTEDSUJ(mp) == 0) return; shouldflush = softdep_flushcache; bio = NULL; jseg = NULL; ump = VFSTOUFS(mp); LOCK_OWNED(ump); fs = ump->um_fs; jblocks = ump->softdep_jblocks; devbsize = ump->um_devvp->v_bufobj.bo_bsize; /* * We write anywhere between a disk block and fs block. The upper * bound is picked to prevent buffer cache fragmentation and limit * processing time per I/O. */ jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */ jrecmax = (fs->fs_bsize / devbsize) * jrecmin; segwritten = 0; for (;;) { cnt = ump->softdep_on_journal; /* * Criteria for writing a segment: * 1) We have a full block. * 2) We're called from jwait() and haven't found the * journal item yet. * 3) Always write if needseg is set. * 4) If we are called from process_worklist and have * not yet written anything we write a partial block * to enforce a 1 second maximum latency on journal * entries. */ if (cnt < (jrecmax - 1) && needwk == NULL && jblocks->jb_needseg == 0 && (segwritten || cnt == 0)) break; cnt++; /* * Verify some free journal space. softdep_prealloc() should * guarantee that we don't run out so this is indicative of * a problem with the flow control. Try to recover * gracefully in any event. */ while (jblocks->jb_free == 0) { if (flags != MNT_WAIT) break; printf("softdep: Out of journal space!\n"); softdep_speedup(ump); msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz); } FREE_LOCK(ump); jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS); workitem_alloc(&jseg->js_list, D_JSEG, mp); LIST_INIT(&jseg->js_entries); LIST_INIT(&jseg->js_indirs); jseg->js_state = ATTACHED; if (shouldflush == 0) jseg->js_state |= COMPLETE; else if (bio == NULL) bio = g_alloc_bio(); jseg->js_jblocks = jblocks; bp = geteblk(fs->fs_bsize, 0); ACQUIRE_LOCK(ump); /* * If there was a race while we were allocating the block * and jseg the entry we care about was likely written. * We bail out in both the WAIT and NOWAIT case and assume * the caller will loop if the entry it cares about is * not written. */ cnt = ump->softdep_on_journal; if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) { bp->b_flags |= B_INVAL | B_NOCACHE; WORKITEM_FREE(jseg, D_JSEG); FREE_LOCK(ump); brelse(bp); ACQUIRE_LOCK(ump); break; } /* * Calculate the disk block size required for the available * records rounded to the min size. */ if (cnt == 0) size = devbsize; else if (cnt < jrecmax) size = howmany(cnt, jrecmin) * devbsize; else size = fs->fs_bsize; /* * Allocate a disk block for this journal data and account * for truncation of the requested size if enough contiguous * space was not available. */ bp->b_blkno = jblocks_alloc(jblocks, size, &size); bp->b_lblkno = bp->b_blkno; bp->b_offset = bp->b_blkno * DEV_BSIZE; bp->b_bcount = size; bp->b_flags &= ~B_INVAL; bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY; /* * Initialize our jseg with cnt records. Assign the next * sequence number to it and link it in-order. */ cnt = MIN(cnt, (size / devbsize) * jrecmin); jseg->js_buf = bp; jseg->js_cnt = cnt; jseg->js_refs = cnt + 1; /* Self ref. */ jseg->js_size = size; jseg->js_seq = jblocks->jb_nextseq++; if (jblocks->jb_oldestseg == NULL) jblocks->jb_oldestseg = jseg; jseg->js_oldseq = jblocks->jb_oldestseg->js_seq; TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next); if (jblocks->jb_writeseg == NULL) jblocks->jb_writeseg = jseg; /* * Start filling in records from the pending list. */ data = bp->b_data; off = 0; /* * Always put a header on the first block. * XXX As with below, there might not be a chance to get * into the loop. Ensure that something valid is written. */ jseg_write(ump, jseg, data); off += JREC_SIZE; data = bp->b_data + off; /* * XXX Something is wrong here. There's no work to do, * but we need to perform and I/O and allow it to complete * anyways. */ if (LIST_EMPTY(&ump->softdep_journal_pending)) stat_emptyjblocks++; while ((wk = LIST_FIRST(&ump->softdep_journal_pending)) != NULL) { if (cnt == 0) break; /* Place a segment header on every device block. */ if ((off % devbsize) == 0) { jseg_write(ump, jseg, data); off += JREC_SIZE; data = bp->b_data + off; } if (wk == needwk) needwk = NULL; remove_from_journal(wk); wk->wk_state |= INPROGRESS; WORKLIST_INSERT(&jseg->js_entries, wk); switch (wk->wk_type) { case D_JADDREF: jaddref_write(WK_JADDREF(wk), jseg, data); break; case D_JREMREF: jremref_write(WK_JREMREF(wk), jseg, data); break; case D_JMVREF: jmvref_write(WK_JMVREF(wk), jseg, data); break; case D_JNEWBLK: jnewblk_write(WK_JNEWBLK(wk), jseg, data); break; case D_JFREEBLK: jfreeblk_write(WK_JFREEBLK(wk), jseg, data); break; case D_JFREEFRAG: jfreefrag_write(WK_JFREEFRAG(wk), jseg, data); break; case D_JTRUNC: jtrunc_write(WK_JTRUNC(wk), jseg, data); break; case D_JFSYNC: jfsync_write(WK_JFSYNC(wk), jseg, data); break; default: panic("process_journal: Unknown type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } off += JREC_SIZE; data = bp->b_data + off; cnt--; } /* Clear any remaining space so we don't leak kernel data */ if (size > off) bzero(data, size - off); /* * Write this one buffer and continue. */ segwritten = 1; jblocks->jb_needseg = 0; WORKLIST_INSERT(&bp->b_dep, &jseg->js_list); FREE_LOCK(ump); pbgetvp(ump->um_devvp, bp); /* * We only do the blocking wait once we find the journal * entry we're looking for. */ if (needwk == NULL && flags == MNT_WAIT) bwrite(bp); else bawrite(bp); ACQUIRE_LOCK(ump); } /* * If we wrote a segment issue a synchronize cache so the journal * is reflected on disk before the data is written. Since reclaiming * journal space also requires writing a journal record this * process also enforces a barrier before reclamation. */ if (segwritten && shouldflush) { softdep_synchronize(bio, ump, TAILQ_LAST(&jblocks->jb_segs, jseglst)); } else if (bio) g_destroy_bio(bio); /* * If we've suspended the filesystem because we ran out of journal * space either try to sync it here to make some progress or * unsuspend it if we already have. */ if (flags == 0 && jblocks->jb_suspended) { if (journal_unsuspend(ump)) return; FREE_LOCK(ump); VFS_SYNC(mp, MNT_NOWAIT); ffs_sbupdate(ump, MNT_WAIT, 0); ACQUIRE_LOCK(ump); } } /* * Complete a jseg, allowing all dependencies awaiting journal writes * to proceed. Each journal dependency also attaches a jsegdep to dependent * structures so that the journal segment can be freed to reclaim space. */ static void complete_jseg(jseg) struct jseg *jseg; { struct worklist *wk; struct jmvref *jmvref; #ifdef INVARIANTS int i = 0; #endif while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) { WORKLIST_REMOVE(wk); wk->wk_state &= ~INPROGRESS; wk->wk_state |= COMPLETE; KASSERT(i++ < jseg->js_cnt, ("handle_written_jseg: overflow %d >= %d", i - 1, jseg->js_cnt)); switch (wk->wk_type) { case D_JADDREF: handle_written_jaddref(WK_JADDREF(wk)); break; case D_JREMREF: handle_written_jremref(WK_JREMREF(wk)); break; case D_JMVREF: rele_jseg(jseg); /* No jsegdep. */ jmvref = WK_JMVREF(wk); LIST_REMOVE(jmvref, jm_deps); if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0) free_pagedep(jmvref->jm_pagedep); WORKITEM_FREE(jmvref, D_JMVREF); break; case D_JNEWBLK: handle_written_jnewblk(WK_JNEWBLK(wk)); break; case D_JFREEBLK: handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep); break; case D_JTRUNC: handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep); break; case D_JFSYNC: rele_jseg(jseg); /* No jsegdep. */ WORKITEM_FREE(wk, D_JFSYNC); break; case D_JFREEFRAG: handle_written_jfreefrag(WK_JFREEFRAG(wk)); break; default: panic("handle_written_jseg: Unknown type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } /* Release the self reference so the structure may be freed. */ rele_jseg(jseg); } /* * Determine which jsegs are ready for completion processing. Waits for * synchronize cache to complete as well as forcing in-order completion * of journal entries. */ static void complete_jsegs(jseg) struct jseg *jseg; { struct jblocks *jblocks; struct jseg *jsegn; jblocks = jseg->js_jblocks; /* * Don't allow out of order completions. If this isn't the first * block wait for it to write before we're done. */ if (jseg != jblocks->jb_writeseg) return; /* Iterate through available jsegs processing their entries. */ while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) { jblocks->jb_oldestwrseq = jseg->js_oldseq; jsegn = TAILQ_NEXT(jseg, js_next); complete_jseg(jseg); jseg = jsegn; } jblocks->jb_writeseg = jseg; /* * Attempt to free jsegs now that oldestwrseq may have advanced. */ free_jsegs(jblocks); } /* * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle * the final completions. */ static void handle_written_jseg(jseg, bp) struct jseg *jseg; struct buf *bp; { if (jseg->js_refs == 0) panic("handle_written_jseg: No self-reference on %p", jseg); jseg->js_state |= DEPCOMPLETE; /* * We'll never need this buffer again, set flags so it will be * discarded. */ bp->b_flags |= B_INVAL | B_NOCACHE; pbrelvp(bp); complete_jsegs(jseg); } static inline struct jsegdep * inoref_jseg(inoref) struct inoref *inoref; { struct jsegdep *jsegdep; jsegdep = inoref->if_jsegdep; inoref->if_jsegdep = NULL; return (jsegdep); } /* * Called once a jremref has made it to stable store. The jremref is marked * complete and we attempt to free it. Any pagedeps writes sleeping waiting * for the jremref to complete will be awoken by free_jremref. */ static void handle_written_jremref(jremref) struct jremref *jremref; { struct inodedep *inodedep; struct jsegdep *jsegdep; struct dirrem *dirrem; /* Grab the jsegdep. */ jsegdep = inoref_jseg(&jremref->jr_ref); /* * Remove us from the inoref list. */ if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, &inodedep) == 0) panic("handle_written_jremref: Lost inodedep"); TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); /* * Complete the dirrem. */ dirrem = jremref->jr_dirrem; jremref->jr_dirrem = NULL; LIST_REMOVE(jremref, jr_deps); jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT; jwork_insert(&dirrem->dm_jwork, jsegdep); if (LIST_EMPTY(&dirrem->dm_jremrefhd) && (dirrem->dm_state & COMPLETE) != 0) add_to_worklist(&dirrem->dm_list, 0); free_jremref(jremref); } /* * Called once a jaddref has made it to stable store. The dependency is * marked complete and any dependent structures are added to the inode * bufwait list to be completed as soon as it is written. If a bitmap write * depends on this entry we move the inode into the inodedephd of the * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap. */ static void handle_written_jaddref(jaddref) struct jaddref *jaddref; { struct jsegdep *jsegdep; struct inodedep *inodedep; struct diradd *diradd; struct mkdir *mkdir; /* Grab the jsegdep. */ jsegdep = inoref_jseg(&jaddref->ja_ref); mkdir = NULL; diradd = NULL; if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 0, &inodedep) == 0) panic("handle_written_jaddref: Lost inodedep."); if (jaddref->ja_diradd == NULL) panic("handle_written_jaddref: No dependency"); if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) { diradd = jaddref->ja_diradd; WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list); } else if (jaddref->ja_state & MKDIR_PARENT) { mkdir = jaddref->ja_mkdir; WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list); } else if (jaddref->ja_state & MKDIR_BODY) mkdir = jaddref->ja_mkdir; else panic("handle_written_jaddref: Unknown dependency %p", jaddref->ja_diradd); jaddref->ja_diradd = NULL; /* also clears ja_mkdir */ /* * Remove us from the inode list. */ TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); /* * The mkdir may be waiting on the jaddref to clear before freeing. */ if (mkdir) { KASSERT(mkdir->md_list.wk_type == D_MKDIR, ("handle_written_jaddref: Incorrect type for mkdir %s", TYPENAME(mkdir->md_list.wk_type))); mkdir->md_jaddref = NULL; diradd = mkdir->md_diradd; mkdir->md_state |= DEPCOMPLETE; complete_mkdir(mkdir); } jwork_insert(&diradd->da_jwork, jsegdep); if (jaddref->ja_state & NEWBLOCK) { inodedep->id_state |= ONDEPLIST; LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd, inodedep, id_deps); } free_jaddref(jaddref); } /* * Called once a jnewblk journal is written. The allocdirect or allocindir * is placed in the bmsafemap to await notification of a written bitmap. If * the operation was canceled we add the segdep to the appropriate * dependency to free the journal space once the canceling operation * completes. */ static void handle_written_jnewblk(jnewblk) struct jnewblk *jnewblk; { struct bmsafemap *bmsafemap; struct freefrag *freefrag; struct freework *freework; struct jsegdep *jsegdep; struct newblk *newblk; /* Grab the jsegdep. */ jsegdep = jnewblk->jn_jsegdep; jnewblk->jn_jsegdep = NULL; if (jnewblk->jn_dep == NULL) panic("handle_written_jnewblk: No dependency for the segdep."); switch (jnewblk->jn_dep->wk_type) { case D_NEWBLK: case D_ALLOCDIRECT: case D_ALLOCINDIR: /* * Add the written block to the bmsafemap so it can * be notified when the bitmap is on disk. */ newblk = WK_NEWBLK(jnewblk->jn_dep); newblk->nb_jnewblk = NULL; if ((newblk->nb_state & GOINGAWAY) == 0) { bmsafemap = newblk->nb_bmsafemap; newblk->nb_state |= ONDEPLIST; LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); } jwork_insert(&newblk->nb_jwork, jsegdep); break; case D_FREEFRAG: /* * A newblock being removed by a freefrag when replaced by * frag extension. */ freefrag = WK_FREEFRAG(jnewblk->jn_dep); freefrag->ff_jdep = NULL; jwork_insert(&freefrag->ff_jwork, jsegdep); break; case D_FREEWORK: /* * A direct block was removed by truncate. */ freework = WK_FREEWORK(jnewblk->jn_dep); freework->fw_jnewblk = NULL; jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep); break; default: panic("handle_written_jnewblk: Unknown type %d.", jnewblk->jn_dep->wk_type); } jnewblk->jn_dep = NULL; free_jnewblk(jnewblk); } /* * Cancel a jfreefrag that won't be needed, probably due to colliding with * an in-flight allocation that has not yet been committed. Divorce us * from the freefrag and mark it DEPCOMPLETE so that it may be added * to the worklist. */ static void cancel_jfreefrag(jfreefrag) struct jfreefrag *jfreefrag; { struct freefrag *freefrag; if (jfreefrag->fr_jsegdep) { free_jsegdep(jfreefrag->fr_jsegdep); jfreefrag->fr_jsegdep = NULL; } freefrag = jfreefrag->fr_freefrag; jfreefrag->fr_freefrag = NULL; free_jfreefrag(jfreefrag); freefrag->ff_state |= DEPCOMPLETE; CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno); } /* * Free a jfreefrag when the parent freefrag is rendered obsolete. */ static void free_jfreefrag(jfreefrag) struct jfreefrag *jfreefrag; { if (jfreefrag->fr_state & INPROGRESS) WORKLIST_REMOVE(&jfreefrag->fr_list); else if (jfreefrag->fr_state & ONWORKLIST) remove_from_journal(&jfreefrag->fr_list); if (jfreefrag->fr_freefrag != NULL) panic("free_jfreefrag: Still attached to a freefrag."); WORKITEM_FREE(jfreefrag, D_JFREEFRAG); } /* * Called when the journal write for a jfreefrag completes. The parent * freefrag is added to the worklist if this completes its dependencies. */ static void handle_written_jfreefrag(jfreefrag) struct jfreefrag *jfreefrag; { struct jsegdep *jsegdep; struct freefrag *freefrag; /* Grab the jsegdep. */ jsegdep = jfreefrag->fr_jsegdep; jfreefrag->fr_jsegdep = NULL; freefrag = jfreefrag->fr_freefrag; if (freefrag == NULL) panic("handle_written_jfreefrag: No freefrag."); freefrag->ff_state |= DEPCOMPLETE; freefrag->ff_jdep = NULL; jwork_insert(&freefrag->ff_jwork, jsegdep); if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) add_to_worklist(&freefrag->ff_list, 0); jfreefrag->fr_freefrag = NULL; free_jfreefrag(jfreefrag); } /* * Called when the journal write for a jfreeblk completes. The jfreeblk * is removed from the freeblks list of pending journal writes and the * jsegdep is moved to the freeblks jwork to be completed when all blocks * have been reclaimed. */ static void handle_written_jblkdep(jblkdep) struct jblkdep *jblkdep; { struct freeblks *freeblks; struct jsegdep *jsegdep; /* Grab the jsegdep. */ jsegdep = jblkdep->jb_jsegdep; jblkdep->jb_jsegdep = NULL; freeblks = jblkdep->jb_freeblks; LIST_REMOVE(jblkdep, jb_deps); jwork_insert(&freeblks->fb_jwork, jsegdep); /* * If the freeblks is all journaled, we can add it to the worklist. */ if (LIST_EMPTY(&freeblks->fb_jblkdephd) && (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) add_to_worklist(&freeblks->fb_list, WK_NODELAY); free_jblkdep(jblkdep); } static struct jsegdep * newjsegdep(struct worklist *wk) { struct jsegdep *jsegdep; jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS); workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp); jsegdep->jd_seg = NULL; return (jsegdep); } static struct jmvref * newjmvref(dp, ino, oldoff, newoff) struct inode *dp; ino_t ino; off_t oldoff; off_t newoff; { struct jmvref *jmvref; jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS); workitem_alloc(&jmvref->jm_list, D_JMVREF, ITOVFS(dp)); jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE; jmvref->jm_parent = dp->i_number; jmvref->jm_ino = ino; jmvref->jm_oldoff = oldoff; jmvref->jm_newoff = newoff; return (jmvref); } /* * Allocate a new jremref that tracks the removal of ip from dp with the * directory entry offset of diroff. Mark the entry as ATTACHED and * DEPCOMPLETE as we have all the information required for the journal write * and the directory has already been removed from the buffer. The caller * is responsible for linking the jremref into the pagedep and adding it * to the journal to write. The MKDIR_PARENT flag is set if we're doing * a DOTDOT addition so handle_workitem_remove() can properly assign * the jsegdep when we're done. */ static struct jremref * newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip, off_t diroff, nlink_t nlink) { struct jremref *jremref; jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS); workitem_alloc(&jremref->jr_list, D_JREMREF, ITOVFS(dp)); jremref->jr_state = ATTACHED; newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff, nlink, ip->i_mode); jremref->jr_dirrem = dirrem; return (jremref); } static inline void newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff, nlink_t nlink, uint16_t mode) { inoref->if_jsegdep = newjsegdep(&inoref->if_list); inoref->if_diroff = diroff; inoref->if_ino = ino; inoref->if_parent = parent; inoref->if_nlink = nlink; inoref->if_mode = mode; } /* * Allocate a new jaddref to track the addition of ino to dp at diroff. The * directory offset may not be known until later. The caller is responsible * adding the entry to the journal when this information is available. nlink * should be the link count prior to the addition and mode is only required * to have the correct FMT. */ static struct jaddref * newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink, uint16_t mode) { struct jaddref *jaddref; jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS); workitem_alloc(&jaddref->ja_list, D_JADDREF, ITOVFS(dp)); jaddref->ja_state = ATTACHED; jaddref->ja_mkdir = NULL; newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode); return (jaddref); } /* * Create a new free dependency for a freework. The caller is responsible * for adjusting the reference count when it has the lock held. The freedep * will track an outstanding bitmap write that will ultimately clear the * freework to continue. */ static struct freedep * newfreedep(struct freework *freework) { struct freedep *freedep; freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS); workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp); freedep->fd_freework = freework; return (freedep); } /* * Free a freedep structure once the buffer it is linked to is written. If * this is the last reference to the freework schedule it for completion. */ static void free_freedep(freedep) struct freedep *freedep; { struct freework *freework; freework = freedep->fd_freework; freework->fw_freeblks->fb_cgwait--; if (--freework->fw_ref == 0) freework_enqueue(freework); WORKITEM_FREE(freedep, D_FREEDEP); } /* * Allocate a new freework structure that may be a level in an indirect * when parent is not NULL or a top level block when it is. The top level * freework structures are allocated without the per-filesystem lock held * and before the freeblks is visible outside of softdep_setup_freeblocks(). */ static struct freework * newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal) struct ufsmount *ump; struct freeblks *freeblks; struct freework *parent; ufs_lbn_t lbn; ufs2_daddr_t nb; int frags; int off; int journal; { struct freework *freework; freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS); workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp); freework->fw_state = ATTACHED; freework->fw_jnewblk = NULL; freework->fw_freeblks = freeblks; freework->fw_parent = parent; freework->fw_lbn = lbn; freework->fw_blkno = nb; freework->fw_frags = frags; freework->fw_indir = NULL; freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -UFS_NXADDR) ? 0 : NINDIR(ump->um_fs) + 1; freework->fw_start = freework->fw_off = off; if (journal) newjfreeblk(freeblks, lbn, nb, frags); if (parent == NULL) { ACQUIRE_LOCK(ump); WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); freeblks->fb_ref++; FREE_LOCK(ump); } return (freework); } /* * Eliminate a jfreeblk for a block that does not need journaling. */ static void cancel_jfreeblk(freeblks, blkno) struct freeblks *freeblks; ufs2_daddr_t blkno; { struct jfreeblk *jfreeblk; struct jblkdep *jblkdep; LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) { if (jblkdep->jb_list.wk_type != D_JFREEBLK) continue; jfreeblk = WK_JFREEBLK(&jblkdep->jb_list); if (jfreeblk->jf_blkno == blkno) break; } if (jblkdep == NULL) return; CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno); free_jsegdep(jblkdep->jb_jsegdep); LIST_REMOVE(jblkdep, jb_deps); WORKITEM_FREE(jfreeblk, D_JFREEBLK); } /* * Allocate a new jfreeblk to journal top level block pointer when truncating * a file. The caller must add this to the worklist when the per-filesystem * lock is held. */ static struct jfreeblk * newjfreeblk(freeblks, lbn, blkno, frags) struct freeblks *freeblks; ufs_lbn_t lbn; ufs2_daddr_t blkno; int frags; { struct jfreeblk *jfreeblk; jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS); workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK, freeblks->fb_list.wk_mp); jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list); jfreeblk->jf_dep.jb_freeblks = freeblks; jfreeblk->jf_ino = freeblks->fb_inum; jfreeblk->jf_lbn = lbn; jfreeblk->jf_blkno = blkno; jfreeblk->jf_frags = frags; LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps); return (jfreeblk); } /* * The journal is only prepared to handle full-size block numbers, so we * have to adjust the record to reflect the change to a full-size block. * For example, suppose we have a block made up of fragments 8-15 and * want to free its last two fragments. We are given a request that says: * FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0 * where frags are the number of fragments to free and oldfrags are the * number of fragments to keep. To block align it, we have to change it to * have a valid full-size blkno, so it becomes: * FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6 */ static void adjust_newfreework(freeblks, frag_offset) struct freeblks *freeblks; int frag_offset; { struct jfreeblk *jfreeblk; KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL && LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK), ("adjust_newfreework: Missing freeblks dependency")); jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd)); jfreeblk->jf_blkno -= frag_offset; jfreeblk->jf_frags += frag_offset; } /* * Allocate a new jtrunc to track a partial truncation. */ static struct jtrunc * newjtrunc(freeblks, size, extsize) struct freeblks *freeblks; off_t size; int extsize; { struct jtrunc *jtrunc; jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS); workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC, freeblks->fb_list.wk_mp); jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list); jtrunc->jt_dep.jb_freeblks = freeblks; jtrunc->jt_ino = freeblks->fb_inum; jtrunc->jt_size = size; jtrunc->jt_extsize = extsize; LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps); return (jtrunc); } /* * If we're canceling a new bitmap we have to search for another ref * to move into the bmsafemap dep. This might be better expressed * with another structure. */ static void move_newblock_dep(jaddref, inodedep) struct jaddref *jaddref; struct inodedep *inodedep; { struct inoref *inoref; struct jaddref *jaddrefn; jaddrefn = NULL; for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; inoref = TAILQ_NEXT(inoref, if_deps)) { if ((jaddref->ja_state & NEWBLOCK) && inoref->if_list.wk_type == D_JADDREF) { jaddrefn = (struct jaddref *)inoref; break; } } if (jaddrefn == NULL) return; jaddrefn->ja_state &= ~(ATTACHED | UNDONE); jaddrefn->ja_state |= jaddref->ja_state & (ATTACHED | UNDONE | NEWBLOCK); jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK); jaddref->ja_state |= ATTACHED; LIST_REMOVE(jaddref, ja_bmdeps); LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn, ja_bmdeps); } /* * Cancel a jaddref either before it has been written or while it is being * written. This happens when a link is removed before the add reaches * the disk. The jaddref dependency is kept linked into the bmsafemap * and inode to prevent the link count or bitmap from reaching the disk * until handle_workitem_remove() re-adjusts the counts and bitmaps as * required. * * Returns 1 if the canceled addref requires journaling of the remove and * 0 otherwise. */ static int cancel_jaddref(jaddref, inodedep, wkhd) struct jaddref *jaddref; struct inodedep *inodedep; struct workhead *wkhd; { struct inoref *inoref; struct jsegdep *jsegdep; int needsj; KASSERT((jaddref->ja_state & COMPLETE) == 0, ("cancel_jaddref: Canceling complete jaddref")); if (jaddref->ja_state & (INPROGRESS | COMPLETE)) needsj = 1; else needsj = 0; if (inodedep == NULL) if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 0, &inodedep) == 0) panic("cancel_jaddref: Lost inodedep"); /* * We must adjust the nlink of any reference operation that follows * us so that it is consistent with the in-memory reference. This * ensures that inode nlink rollbacks always have the correct link. */ if (needsj == 0) { for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; inoref = TAILQ_NEXT(inoref, if_deps)) { if (inoref->if_state & GOINGAWAY) break; inoref->if_nlink--; } } jsegdep = inoref_jseg(&jaddref->ja_ref); if (jaddref->ja_state & NEWBLOCK) move_newblock_dep(jaddref, inodedep); wake_worklist(&jaddref->ja_list); jaddref->ja_mkdir = NULL; if (jaddref->ja_state & INPROGRESS) { jaddref->ja_state &= ~INPROGRESS; WORKLIST_REMOVE(&jaddref->ja_list); jwork_insert(wkhd, jsegdep); } else { free_jsegdep(jsegdep); if (jaddref->ja_state & DEPCOMPLETE) remove_from_journal(&jaddref->ja_list); } jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE); /* * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove * can arrange for them to be freed with the bitmap. Otherwise we * no longer need this addref attached to the inoreflst and it * will incorrectly adjust nlink if we leave it. */ if ((jaddref->ja_state & NEWBLOCK) == 0) { TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); jaddref->ja_state |= COMPLETE; free_jaddref(jaddref); return (needsj); } /* * Leave the head of the list for jsegdeps for fast merging. */ if (LIST_FIRST(wkhd) != NULL) { jaddref->ja_state |= ONWORKLIST; LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list); } else WORKLIST_INSERT(wkhd, &jaddref->ja_list); return (needsj); } /* * Attempt to free a jaddref structure when some work completes. This * should only succeed once the entry is written and all dependencies have * been notified. */ static void free_jaddref(jaddref) struct jaddref *jaddref; { if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE) return; if (jaddref->ja_ref.if_jsegdep) panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n", jaddref, jaddref->ja_state); if (jaddref->ja_state & NEWBLOCK) LIST_REMOVE(jaddref, ja_bmdeps); if (jaddref->ja_state & (INPROGRESS | ONWORKLIST)) panic("free_jaddref: Bad state %p(0x%X)", jaddref, jaddref->ja_state); if (jaddref->ja_mkdir != NULL) panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state); WORKITEM_FREE(jaddref, D_JADDREF); } /* * Free a jremref structure once it has been written or discarded. */ static void free_jremref(jremref) struct jremref *jremref; { if (jremref->jr_ref.if_jsegdep) free_jsegdep(jremref->jr_ref.if_jsegdep); if (jremref->jr_state & INPROGRESS) panic("free_jremref: IO still pending"); WORKITEM_FREE(jremref, D_JREMREF); } /* * Free a jnewblk structure. */ static void free_jnewblk(jnewblk) struct jnewblk *jnewblk; { if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE) return; LIST_REMOVE(jnewblk, jn_deps); if (jnewblk->jn_dep != NULL) panic("free_jnewblk: Dependency still attached."); WORKITEM_FREE(jnewblk, D_JNEWBLK); } /* * Cancel a jnewblk which has been been made redundant by frag extension. */ static void cancel_jnewblk(jnewblk, wkhd) struct jnewblk *jnewblk; struct workhead *wkhd; { struct jsegdep *jsegdep; CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno); jsegdep = jnewblk->jn_jsegdep; if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL) panic("cancel_jnewblk: Invalid state"); jnewblk->jn_jsegdep = NULL; jnewblk->jn_dep = NULL; jnewblk->jn_state |= GOINGAWAY; if (jnewblk->jn_state & INPROGRESS) { jnewblk->jn_state &= ~INPROGRESS; WORKLIST_REMOVE(&jnewblk->jn_list); jwork_insert(wkhd, jsegdep); } else { free_jsegdep(jsegdep); remove_from_journal(&jnewblk->jn_list); } wake_worklist(&jnewblk->jn_list); WORKLIST_INSERT(wkhd, &jnewblk->jn_list); } static void free_jblkdep(jblkdep) struct jblkdep *jblkdep; { if (jblkdep->jb_list.wk_type == D_JFREEBLK) WORKITEM_FREE(jblkdep, D_JFREEBLK); else if (jblkdep->jb_list.wk_type == D_JTRUNC) WORKITEM_FREE(jblkdep, D_JTRUNC); else panic("free_jblkdep: Unexpected type %s", TYPENAME(jblkdep->jb_list.wk_type)); } /* * Free a single jseg once it is no longer referenced in memory or on * disk. Reclaim journal blocks and dependencies waiting for the segment * to disappear. */ static void free_jseg(jseg, jblocks) struct jseg *jseg; struct jblocks *jblocks; { struct freework *freework; /* * Free freework structures that were lingering to indicate freed * indirect blocks that forced journal write ordering on reallocate. */ while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL) indirblk_remove(freework); if (jblocks->jb_oldestseg == jseg) jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next); TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next); jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size); KASSERT(LIST_EMPTY(&jseg->js_entries), ("free_jseg: Freed jseg has valid entries.")); WORKITEM_FREE(jseg, D_JSEG); } /* * Free all jsegs that meet the criteria for being reclaimed and update * oldestseg. */ static void free_jsegs(jblocks) struct jblocks *jblocks; { struct jseg *jseg; /* * Free only those jsegs which have none allocated before them to * preserve the journal space ordering. */ while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) { /* * Only reclaim space when nothing depends on this journal * set and another set has written that it is no longer * valid. */ if (jseg->js_refs != 0) { jblocks->jb_oldestseg = jseg; return; } if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE) break; if (jseg->js_seq > jblocks->jb_oldestwrseq) break; /* * We can free jsegs that didn't write entries when * oldestwrseq == js_seq. */ if (jseg->js_seq == jblocks->jb_oldestwrseq && jseg->js_cnt != 0) break; free_jseg(jseg, jblocks); } /* * If we exited the loop above we still must discover the * oldest valid segment. */ if (jseg) for (jseg = jblocks->jb_oldestseg; jseg != NULL; jseg = TAILQ_NEXT(jseg, js_next)) if (jseg->js_refs != 0) break; jblocks->jb_oldestseg = jseg; /* * The journal has no valid records but some jsegs may still be * waiting on oldestwrseq to advance. We force a small record * out to permit these lingering records to be reclaimed. */ if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs)) jblocks->jb_needseg = 1; } /* * Release one reference to a jseg and free it if the count reaches 0. This * should eventually reclaim journal space as well. */ static void rele_jseg(jseg) struct jseg *jseg; { KASSERT(jseg->js_refs > 0, ("free_jseg: Invalid refcnt %d", jseg->js_refs)); if (--jseg->js_refs != 0) return; free_jsegs(jseg->js_jblocks); } /* * Release a jsegdep and decrement the jseg count. */ static void free_jsegdep(jsegdep) struct jsegdep *jsegdep; { if (jsegdep->jd_seg) rele_jseg(jsegdep->jd_seg); WORKITEM_FREE(jsegdep, D_JSEGDEP); } /* * Wait for a journal item to make it to disk. Initiate journal processing * if required. */ static int jwait(wk, waitfor) struct worklist *wk; int waitfor; { LOCK_OWNED(VFSTOUFS(wk->wk_mp)); /* * Blocking journal waits cause slow synchronous behavior. Record * stats on the frequency of these blocking operations. */ if (waitfor == MNT_WAIT) { stat_journal_wait++; switch (wk->wk_type) { case D_JREMREF: case D_JMVREF: stat_jwait_filepage++; break; case D_JTRUNC: case D_JFREEBLK: stat_jwait_freeblks++; break; case D_JNEWBLK: stat_jwait_newblk++; break; case D_JADDREF: stat_jwait_inode++; break; default: break; } } /* * If IO has not started we process the journal. We can't mark the * worklist item as IOWAITING because we drop the lock while * processing the journal and the worklist entry may be freed after * this point. The caller may call back in and re-issue the request. */ if ((wk->wk_state & INPROGRESS) == 0) { softdep_process_journal(wk->wk_mp, wk, waitfor); if (waitfor != MNT_WAIT) return (EBUSY); return (0); } if (waitfor != MNT_WAIT) return (EBUSY); wait_worklist(wk, "jwait"); return (0); } /* * Lookup an inodedep based on an inode pointer and set the nlinkdelta as * appropriate. This is a convenience function to reduce duplicate code * for the setup and revert functions below. */ static struct inodedep * inodedep_lookup_ip(ip) struct inode *ip; { struct inodedep *inodedep; KASSERT(ip->i_nlink >= ip->i_effnlink, ("inodedep_lookup_ip: bad delta")); (void) inodedep_lookup(ITOVFS(ip), ip->i_number, DEPALLOC, &inodedep); inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); return (inodedep); } /* * Called prior to creating a new inode and linking it to a directory. The * jaddref structure must already be allocated by softdep_setup_inomapdep * and it is discovered here so we can initialize the mode and update * nlinkdelta. */ void softdep_setup_create(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_create called on non-softdep filesystem")); KASSERT(ip->i_nlink == 1, ("softdep_setup_create: Invalid link count.")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, ("softdep_setup_create: No addref structure present.")); } softdep_prelink(dvp, NULL); FREE_LOCK(ITOUMP(dp)); } /* * Create a jaddref structure to track the addition of a DOTDOT link when * we are reparenting an inode as part of a rename. This jaddref will be * found by softdep_setup_directory_change. Adjusts nlinkdelta for * non-journaling softdep. */ void softdep_setup_dotdot_link(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_dotdot_link called on non-softdep filesystem")); dvp = ITOV(dp); jaddref = NULL; /* * We don't set MKDIR_PARENT as this is not tied to a mkdir and * is used as a normal link would be. */ if (DOINGSUJ(dvp)) jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, dp->i_effnlink - 1, dp->i_mode); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(dp); if (jaddref) TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); softdep_prelink(dvp, ITOV(ip)); FREE_LOCK(ITOUMP(dp)); } /* * Create a jaddref structure to track a new link to an inode. The directory * offset is not known until softdep_setup_directory_add or * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling * softdep. */ void softdep_setup_link(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_link called on non-softdep filesystem")); dvp = ITOV(dp); jaddref = NULL; if (DOINGSUJ(dvp)) jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1, ip->i_mode); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (jaddref) TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); softdep_prelink(dvp, ITOV(ip)); FREE_LOCK(ITOUMP(dp)); } /* * Called to create the jaddref structures to track . and .. references as * well as lookup and further initialize the incomplete jaddref created * by softdep_setup_inomapdep when the inode was allocated. Adjusts * nlinkdelta for non-journaling softdep. */ void softdep_setup_mkdir(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *dotdotaddref; struct jaddref *dotaddref; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_mkdir called on non-softdep filesystem")); dvp = ITOV(dp); dotaddref = dotdotaddref = NULL; if (DOINGSUJ(dvp)) { dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1, ip->i_mode); dotaddref->ja_state |= MKDIR_BODY; dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, dp->i_effnlink - 1, dp->i_mode); dotdotaddref->ja_state |= MKDIR_PARENT; } ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL, ("softdep_setup_mkdir: No addref structure present.")); KASSERT(jaddref->ja_parent == dp->i_number, ("softdep_setup_mkdir: bad parent %ju", (uintmax_t)jaddref->ja_parent)); TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref, if_deps); } inodedep = inodedep_lookup_ip(dp); if (DOINGSUJ(dvp)) TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &dotdotaddref->ja_ref, if_deps); softdep_prelink(ITOV(dp), NULL); FREE_LOCK(ITOUMP(dp)); } /* * Called to track nlinkdelta of the inode and parent directories prior to * unlinking a directory. */ void softdep_setup_rmdir(dp, ip) struct inode *dp; struct inode *ip; { struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_rmdir called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); (void) inodedep_lookup_ip(ip); (void) inodedep_lookup_ip(dp); softdep_prelink(dvp, ITOV(ip)); FREE_LOCK(ITOUMP(dp)); } /* * Called to track nlinkdelta of the inode and parent directories prior to * unlink. */ void softdep_setup_unlink(dp, ip) struct inode *dp; struct inode *ip; { struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_setup_unlink called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); (void) inodedep_lookup_ip(ip); (void) inodedep_lookup_ip(dp); softdep_prelink(dvp, ITOV(ip)); FREE_LOCK(ITOUMP(dp)); } /* * Called to release the journal structures created by a failed non-directory * creation. Adjusts nlinkdelta for non-journaling softdep. */ void softdep_revert_create(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS((dp))) != 0, ("softdep_revert_create called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref->ja_parent == dp->i_number, ("softdep_revert_create: addref parent mismatch")); cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); } FREE_LOCK(ITOUMP(dp)); } /* * Called to release the journal structures created by a failed link * addition. Adjusts nlinkdelta for non-journaling softdep. */ void softdep_revert_link(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_revert_link called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref->ja_parent == dp->i_number, ("softdep_revert_link: addref parent mismatch")); cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); } FREE_LOCK(ITOUMP(dp)); } /* * Called to release the journal structures created by a failed mkdir * attempt. Adjusts nlinkdelta for non-journaling softdep. */ void softdep_revert_mkdir(dp, ip) struct inode *dp; struct inode *ip; { struct inodedep *inodedep; struct jaddref *jaddref; struct jaddref *dotaddref; struct vnode *dvp; KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_revert_mkdir called on non-softdep filesystem")); dvp = ITOV(dp); ACQUIRE_LOCK(ITOUMP(dp)); inodedep = inodedep_lookup_ip(dp); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref->ja_parent == ip->i_number, ("softdep_revert_mkdir: dotdot addref parent mismatch")); cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); } inodedep = inodedep_lookup_ip(ip); if (DOINGSUJ(dvp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref->ja_parent == dp->i_number, ("softdep_revert_mkdir: addref parent mismatch")); dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, inoreflst, if_deps); cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); KASSERT(dotaddref->ja_parent == ip->i_number, ("softdep_revert_mkdir: dot addref parent mismatch")); cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait); } FREE_LOCK(ITOUMP(dp)); } /* * Called to correct nlinkdelta after a failed rmdir. */ void softdep_revert_rmdir(dp, ip) struct inode *dp; struct inode *ip; { KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, ("softdep_revert_rmdir called on non-softdep filesystem")); ACQUIRE_LOCK(ITOUMP(dp)); (void) inodedep_lookup_ip(ip); (void) inodedep_lookup_ip(dp); FREE_LOCK(ITOUMP(dp)); } /* * Protecting the freemaps (or bitmaps). * * To eliminate the need to execute fsck before mounting a filesystem * after a power failure, one must (conservatively) guarantee that the * on-disk copy of the bitmaps never indicate that a live inode or block is * free. So, when a block or inode is allocated, the bitmap should be * updated (on disk) before any new pointers. When a block or inode is * freed, the bitmap should not be updated until all pointers have been * reset. The latter dependency is handled by the delayed de-allocation * approach described below for block and inode de-allocation. The former * dependency is handled by calling the following procedure when a block or * inode is allocated. When an inode is allocated an "inodedep" is created * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. * Each "inodedep" is also inserted into the hash indexing structure so * that any additional link additions can be made dependent on the inode * allocation. * * The ufs filesystem maintains a number of free block counts (e.g., per * cylinder group, per cylinder and per pair) * in addition to the bitmaps. These counts are used to improve efficiency * during allocation and therefore must be consistent with the bitmaps. * There is no convenient way to guarantee post-crash consistency of these * counts with simple update ordering, for two main reasons: (1) The counts * and bitmaps for a single cylinder group block are not in the same disk * sector. If a disk write is interrupted (e.g., by power failure), one may * be written and the other not. (2) Some of the counts are located in the * superblock rather than the cylinder group block. So, we focus our soft * updates implementation on protecting the bitmaps. When mounting a * filesystem, we recompute the auxiliary counts from the bitmaps. */ /* * Called just after updating the cylinder group block to allocate an inode. */ void softdep_setup_inomapdep(bp, ip, newinum, mode) struct buf *bp; /* buffer for cylgroup block with inode map */ struct inode *ip; /* inode related to allocation */ ino_t newinum; /* new inode number being allocated */ int mode; { struct inodedep *inodedep; struct bmsafemap *bmsafemap; struct jaddref *jaddref; struct mount *mp; struct fs *fs; mp = ITOVFS(ip); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_inomapdep called on non-softdep filesystem")); fs = VFSTOUFS(mp)->um_fs; jaddref = NULL; /* * Allocate the journal reference add structure so that the bitmap * can be dependent on it. */ if (MOUNTEDSUJ(mp)) { jaddref = newjaddref(ip, newinum, 0, 0, mode); jaddref->ja_state |= NEWBLOCK; } /* * Create a dependency for the newly allocated inode. * Panic if it already exists as something is seriously wrong. * Otherwise add it to the dependency list for the buffer holding * the cylinder group map from which it was allocated. * * We have to preallocate a bmsafemap entry in case it is needed * in bmsafemap_lookup since once we allocate the inodedep, we * have to finish initializing it before we can FREE_LOCK(). * By preallocating, we avoid FREE_LOCK() while doing a malloc * in bmsafemap_lookup. We cannot call bmsafemap_lookup before * creating the inodedep as it can be freed during the time * that we FREE_LOCK() while allocating the inodedep. We must * call workitem_alloc() before entering the locked section as * it also acquires the lock and we must avoid trying doing so * recursively. */ bmsafemap = malloc(sizeof(struct bmsafemap), M_BMSAFEMAP, M_SOFTDEP_FLAGS); workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); ACQUIRE_LOCK(ITOUMP(ip)); if ((inodedep_lookup(mp, newinum, DEPALLOC, &inodedep))) panic("softdep_setup_inomapdep: dependency %p for new" "inode already exists", inodedep); bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap); if (jaddref) { LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps); TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); } else { inodedep->id_state |= ONDEPLIST; LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); } inodedep->id_bmsafemap = bmsafemap; inodedep->id_state &= ~DEPCOMPLETE; FREE_LOCK(ITOUMP(ip)); } /* * Called just after updating the cylinder group block to * allocate block or fragment. */ void softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) struct buf *bp; /* buffer for cylgroup block with block map */ struct mount *mp; /* filesystem doing allocation */ ufs2_daddr_t newblkno; /* number of newly allocated block */ int frags; /* Number of fragments. */ int oldfrags; /* Previous number of fragments for extend. */ { struct newblk *newblk; struct bmsafemap *bmsafemap; struct jnewblk *jnewblk; struct ufsmount *ump; struct fs *fs; KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_blkmapdep called on non-softdep filesystem")); ump = VFSTOUFS(mp); fs = ump->um_fs; jnewblk = NULL; /* * Create a dependency for the newly allocated block. * Add it to the dependency list for the buffer holding * the cylinder group map from which it was allocated. */ if (MOUNTEDSUJ(mp)) { jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS); workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp); jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list); jnewblk->jn_state = ATTACHED; jnewblk->jn_blkno = newblkno; jnewblk->jn_frags = frags; jnewblk->jn_oldfrags = oldfrags; #ifdef SUJ_DEBUG { struct cg *cgp; uint8_t *blksfree; long bno; int i; cgp = (struct cg *)bp->b_data; blksfree = cg_blksfree(cgp); bno = dtogd(fs, jnewblk->jn_blkno); for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { if (isset(blksfree, bno + i)) panic("softdep_setup_blkmapdep: " "free fragment %d from %d-%d " "state 0x%X dep %p", i, jnewblk->jn_oldfrags, jnewblk->jn_frags, jnewblk->jn_state, jnewblk->jn_dep); } } #endif } CTR3(KTR_SUJ, "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d", newblkno, frags, oldfrags); ACQUIRE_LOCK(ump); if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0) panic("softdep_setup_blkmapdep: found block"); newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, newblkno), NULL); if (jnewblk) { jnewblk->jn_dep = (struct worklist *)newblk; LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps); } else { newblk->nb_state |= ONDEPLIST; LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); } newblk->nb_bmsafemap = bmsafemap; newblk->nb_jnewblk = jnewblk; FREE_LOCK(ump); } #define BMSAFEMAP_HASH(ump, cg) \ (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size]) static int bmsafemap_find(bmsafemaphd, cg, bmsafemapp) struct bmsafemap_hashhead *bmsafemaphd; int cg; struct bmsafemap **bmsafemapp; { struct bmsafemap *bmsafemap; LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash) if (bmsafemap->sm_cg == cg) break; if (bmsafemap) { *bmsafemapp = bmsafemap; return (1); } *bmsafemapp = NULL; return (0); } /* * Find the bmsafemap associated with a cylinder group buffer. * If none exists, create one. The buffer must be locked when * this routine is called and this routine must be called with * the softdep lock held. To avoid giving up the lock while * allocating a new bmsafemap, a preallocated bmsafemap may be * provided. If it is provided but not needed, it is freed. */ static struct bmsafemap * bmsafemap_lookup(mp, bp, cg, newbmsafemap) struct mount *mp; struct buf *bp; int cg; struct bmsafemap *newbmsafemap; { struct bmsafemap_hashhead *bmsafemaphd; struct bmsafemap *bmsafemap, *collision; struct worklist *wk; struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer")); LIST_FOREACH(wk, &bp->b_dep, wk_list) { if (wk->wk_type == D_BMSAFEMAP) { if (newbmsafemap) WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); return (WK_BMSAFEMAP(wk)); } } bmsafemaphd = BMSAFEMAP_HASH(ump, cg); if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) { if (newbmsafemap) WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); return (bmsafemap); } if (newbmsafemap) { bmsafemap = newbmsafemap; } else { FREE_LOCK(ump); bmsafemap = malloc(sizeof(struct bmsafemap), M_BMSAFEMAP, M_SOFTDEP_FLAGS); workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); ACQUIRE_LOCK(ump); } bmsafemap->sm_buf = bp; LIST_INIT(&bmsafemap->sm_inodedephd); LIST_INIT(&bmsafemap->sm_inodedepwr); LIST_INIT(&bmsafemap->sm_newblkhd); LIST_INIT(&bmsafemap->sm_newblkwr); LIST_INIT(&bmsafemap->sm_jaddrefhd); LIST_INIT(&bmsafemap->sm_jnewblkhd); LIST_INIT(&bmsafemap->sm_freehd); LIST_INIT(&bmsafemap->sm_freewr); if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) { WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); return (collision); } bmsafemap->sm_cg = cg; LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash); LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); return (bmsafemap); } /* * Direct block allocation dependencies. * * When a new block is allocated, the corresponding disk locations must be * initialized (with zeros or new data) before the on-disk inode points to * them. Also, the freemap from which the block was allocated must be * updated (on disk) before the inode's pointer. These two dependencies are * independent of each other and are needed for all file blocks and indirect * blocks that are pointed to directly by the inode. Just before the * "in-core" version of the inode is updated with a newly allocated block * number, a procedure (below) is called to setup allocation dependency * structures. These structures are removed when the corresponding * dependencies are satisfied or when the block allocation becomes obsolete * (i.e., the file is deleted, the block is de-allocated, or the block is a * fragment that gets upgraded). All of these cases are handled in * procedures described later. * * When a file extension causes a fragment to be upgraded, either to a larger * fragment or to a full block, the on-disk location may change (if the * previous fragment could not simply be extended). In this case, the old * fragment must be de-allocated, but not until after the inode's pointer has * been updated. In most cases, this is handled by later procedures, which * will construct a "freefrag" structure to be added to the workitem queue * when the inode update is complete (or obsolete). The main exception to * this is when an allocation occurs while a pending allocation dependency * (for the same block pointer) remains. This case is handled in the main * allocation dependency setup procedure by immediately freeing the * unreferenced fragments. */ void softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp) struct inode *ip; /* inode to which block is being added */ ufs_lbn_t off; /* block pointer within inode */ ufs2_daddr_t newblkno; /* disk block number being added */ ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ long newsize; /* size of new block */ long oldsize; /* size of new block */ struct buf *bp; /* bp for allocated block */ { struct allocdirect *adp, *oldadp; struct allocdirectlst *adphead; struct freefrag *freefrag; struct inodedep *inodedep; struct pagedep *pagedep; struct jnewblk *jnewblk; struct newblk *newblk; struct mount *mp; ufs_lbn_t lbn; lbn = bp->b_lblkno; mp = ITOVFS(ip); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_allocdirect called on non-softdep filesystem")); if (oldblkno && oldblkno != newblkno) /* * The usual case is that a smaller fragment that * was just allocated has been replaced with a bigger * fragment or a full-size block. If it is marked as * B_DELWRI, the current contents have not been written * to disk. It is possible that the block was written * earlier, but very uncommon. If the block has never * been written, there is no need to send a BIO_DELETE * for it when it is freed. The gain from avoiding the * TRIMs for the common case of unwritten blocks far * exceeds the cost of the write amplification for the * uncommon case of failing to send a TRIM for a block * that had been written. */ freefrag = newfreefrag(ip, oldblkno, oldsize, lbn, (bp->b_flags & B_DELWRI) != 0 ? NOTRIM_KEY : SINGLETON_KEY); else freefrag = NULL; CTR6(KTR_SUJ, "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd " "off %jd newsize %ld oldsize %d", ip->i_number, newblkno, oldblkno, off, newsize, oldsize); ACQUIRE_LOCK(ITOUMP(ip)); if (off >= UFS_NDADDR) { if (lbn > 0) panic("softdep_setup_allocdirect: bad lbn %jd, off %jd", lbn, off); /* allocating an indirect block */ if (oldblkno != 0) panic("softdep_setup_allocdirect: non-zero indir"); } else { if (off != lbn) panic("softdep_setup_allocdirect: lbn %jd != off %jd", lbn, off); /* * Allocating a direct block. * * If we are allocating a directory block, then we must * allocate an associated pagedep to track additions and * deletions. */ if ((ip->i_mode & IFMT) == IFDIR) pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC, &pagedep); } if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) panic("softdep_setup_allocdirect: lost block"); KASSERT(newblk->nb_list.wk_type == D_NEWBLK, ("softdep_setup_allocdirect: newblk already initialized")); /* * Convert the newblk to an allocdirect. */ WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); adp = (struct allocdirect *)newblk; newblk->nb_freefrag = freefrag; adp->ad_offset = off; adp->ad_oldblkno = oldblkno; adp->ad_newsize = newsize; adp->ad_oldsize = oldsize; /* * Finish initializing the journal. */ if ((jnewblk = newblk->nb_jnewblk) != NULL) { jnewblk->jn_ino = ip->i_number; jnewblk->jn_lbn = lbn; add_to_journal(&jnewblk->jn_list); } if (freefrag && freefrag->ff_jdep != NULL && freefrag->ff_jdep->wk_type == D_JFREEFRAG) add_to_journal(freefrag->ff_jdep); inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); adp->ad_inodedep = inodedep; WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); /* * The list of allocdirects must be kept in sorted and ascending * order so that the rollback routines can quickly determine the * first uncommitted block (the size of the file stored on disk * ends at the end of the lowest committed fragment, or if there * are no fragments, at the end of the highest committed block). * Since files generally grow, the typical case is that the new * block is to be added at the end of the list. We speed this * special case by checking against the last allocdirect in the * list before laboriously traversing the list looking for the * insertion point. */ adphead = &inodedep->id_newinoupdt; oldadp = TAILQ_LAST(adphead, allocdirectlst); if (oldadp == NULL || oldadp->ad_offset <= off) { /* insert at end of list */ TAILQ_INSERT_TAIL(adphead, adp, ad_next); if (oldadp != NULL && oldadp->ad_offset == off) allocdirect_merge(adphead, adp, oldadp); FREE_LOCK(ITOUMP(ip)); return; } TAILQ_FOREACH(oldadp, adphead, ad_next) { if (oldadp->ad_offset >= off) break; } if (oldadp == NULL) panic("softdep_setup_allocdirect: lost entry"); /* insert in middle of list */ TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); if (oldadp->ad_offset == off) allocdirect_merge(adphead, adp, oldadp); FREE_LOCK(ITOUMP(ip)); } /* * Merge a newer and older journal record to be stored either in a * newblock or freefrag. This handles aggregating journal records for * fragment allocation into a second record as well as replacing a * journal free with an aborted journal allocation. A segment for the * oldest record will be placed on wkhd if it has been written. If not * the segment for the newer record will suffice. */ static struct worklist * jnewblk_merge(new, old, wkhd) struct worklist *new; struct worklist *old; struct workhead *wkhd; { struct jnewblk *njnewblk; struct jnewblk *jnewblk; /* Handle NULLs to simplify callers. */ if (new == NULL) return (old); if (old == NULL) return (new); /* Replace a jfreefrag with a jnewblk. */ if (new->wk_type == D_JFREEFRAG) { if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno) panic("jnewblk_merge: blkno mismatch: %p, %p", old, new); cancel_jfreefrag(WK_JFREEFRAG(new)); return (old); } if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK) panic("jnewblk_merge: Bad type: old %d new %d\n", old->wk_type, new->wk_type); /* * Handle merging of two jnewblk records that describe * different sets of fragments in the same block. */ jnewblk = WK_JNEWBLK(old); njnewblk = WK_JNEWBLK(new); if (jnewblk->jn_blkno != njnewblk->jn_blkno) panic("jnewblk_merge: Merging disparate blocks."); /* * The record may be rolled back in the cg. */ if (jnewblk->jn_state & UNDONE) { jnewblk->jn_state &= ~UNDONE; njnewblk->jn_state |= UNDONE; njnewblk->jn_state &= ~ATTACHED; } /* * We modify the newer addref and free the older so that if neither * has been written the most up-to-date copy will be on disk. If * both have been written but rolled back we only temporarily need * one of them to fix the bits when the cg write completes. */ jnewblk->jn_state |= ATTACHED | COMPLETE; njnewblk->jn_oldfrags = jnewblk->jn_oldfrags; cancel_jnewblk(jnewblk, wkhd); WORKLIST_REMOVE(&jnewblk->jn_list); free_jnewblk(jnewblk); return (new); } /* * Replace an old allocdirect dependency with a newer one. * This routine must be called with splbio interrupts blocked. */ static void allocdirect_merge(adphead, newadp, oldadp) struct allocdirectlst *adphead; /* head of list holding allocdirects */ struct allocdirect *newadp; /* allocdirect being added */ struct allocdirect *oldadp; /* existing allocdirect being checked */ { struct worklist *wk; struct freefrag *freefrag; freefrag = NULL; LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp)); if (newadp->ad_oldblkno != oldadp->ad_newblkno || newadp->ad_oldsize != oldadp->ad_newsize || newadp->ad_offset >= UFS_NDADDR) panic("%s %jd != new %jd || old size %ld != new %ld", "allocdirect_merge: old blkno", (intmax_t)newadp->ad_oldblkno, (intmax_t)oldadp->ad_newblkno, newadp->ad_oldsize, oldadp->ad_newsize); newadp->ad_oldblkno = oldadp->ad_oldblkno; newadp->ad_oldsize = oldadp->ad_oldsize; /* * If the old dependency had a fragment to free or had never * previously had a block allocated, then the new dependency * can immediately post its freefrag and adopt the old freefrag. * This action is done by swapping the freefrag dependencies. * The new dependency gains the old one's freefrag, and the * old one gets the new one and then immediately puts it on * the worklist when it is freed by free_newblk. It is * not possible to do this swap when the old dependency had a * non-zero size but no previous fragment to free. This condition * arises when the new block is an extension of the old block. * Here, the first part of the fragment allocated to the new * dependency is part of the block currently claimed on disk by * the old dependency, so cannot legitimately be freed until the * conditions for the new dependency are fulfilled. */ freefrag = newadp->ad_freefrag; if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { newadp->ad_freefrag = oldadp->ad_freefrag; oldadp->ad_freefrag = freefrag; } /* * If we are tracking a new directory-block allocation, * move it from the old allocdirect to the new allocdirect. */ if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { WORKLIST_REMOVE(wk); if (!LIST_EMPTY(&oldadp->ad_newdirblk)) panic("allocdirect_merge: extra newdirblk"); WORKLIST_INSERT(&newadp->ad_newdirblk, wk); } TAILQ_REMOVE(adphead, oldadp, ad_next); /* * We need to move any journal dependencies over to the freefrag * that releases this block if it exists. Otherwise we are * extending an existing block and we'll wait until that is * complete to release the journal space and extend the * new journal to cover this old space as well. */ if (freefrag == NULL) { if (oldadp->ad_newblkno != newadp->ad_newblkno) panic("allocdirect_merge: %jd != %jd", oldadp->ad_newblkno, newadp->ad_newblkno); newadp->ad_block.nb_jnewblk = (struct jnewblk *) jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list, &oldadp->ad_block.nb_jnewblk->jn_list, &newadp->ad_block.nb_jwork); oldadp->ad_block.nb_jnewblk = NULL; cancel_newblk(&oldadp->ad_block, NULL, &newadp->ad_block.nb_jwork); } else { wk = (struct worklist *) cancel_newblk(&oldadp->ad_block, &freefrag->ff_list, &freefrag->ff_jwork); freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk, &freefrag->ff_jwork); } free_newblk(&oldadp->ad_block); } /* * Allocate a jfreefrag structure to journal a single block free. */ static struct jfreefrag * newjfreefrag(freefrag, ip, blkno, size, lbn) struct freefrag *freefrag; struct inode *ip; ufs2_daddr_t blkno; long size; ufs_lbn_t lbn; { struct jfreefrag *jfreefrag; struct fs *fs; fs = ITOFS(ip); jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG, M_SOFTDEP_FLAGS); workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, ITOVFS(ip)); jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list); jfreefrag->fr_state = ATTACHED | DEPCOMPLETE; jfreefrag->fr_ino = ip->i_number; jfreefrag->fr_lbn = lbn; jfreefrag->fr_blkno = blkno; jfreefrag->fr_frags = numfrags(fs, size); jfreefrag->fr_freefrag = freefrag; return (jfreefrag); } /* * Allocate a new freefrag structure. */ static struct freefrag * newfreefrag(ip, blkno, size, lbn, key) struct inode *ip; ufs2_daddr_t blkno; long size; ufs_lbn_t lbn; u_long key; { struct freefrag *freefrag; struct ufsmount *ump; struct fs *fs; CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd", ip->i_number, blkno, size, lbn); ump = ITOUMP(ip); fs = ump->um_fs; if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) panic("newfreefrag: frag size"); freefrag = malloc(sizeof(struct freefrag), M_FREEFRAG, M_SOFTDEP_FLAGS); workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ump)); freefrag->ff_state = ATTACHED; LIST_INIT(&freefrag->ff_jwork); freefrag->ff_inum = ip->i_number; freefrag->ff_vtype = ITOV(ip)->v_type; freefrag->ff_blkno = blkno; freefrag->ff_fragsize = size; freefrag->ff_key = key; if (MOUNTEDSUJ(UFSTOVFS(ump))) { freefrag->ff_jdep = (struct worklist *) newjfreefrag(freefrag, ip, blkno, size, lbn); } else { freefrag->ff_state |= DEPCOMPLETE; freefrag->ff_jdep = NULL; } return (freefrag); } /* * This workitem de-allocates fragments that were replaced during * file block allocation. */ static void handle_workitem_freefrag(freefrag) struct freefrag *freefrag; { struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp); struct workhead wkhd; CTR3(KTR_SUJ, "handle_workitem_freefrag: ino %d blkno %jd size %ld", freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize); /* * It would be illegal to add new completion items to the * freefrag after it was schedule to be done so it must be * safe to modify the list head here. */ LIST_INIT(&wkhd); ACQUIRE_LOCK(ump); LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list); /* * If the journal has not been written we must cancel it here. */ if (freefrag->ff_jdep) { if (freefrag->ff_jdep->wk_type != D_JNEWBLK) panic("handle_workitem_freefrag: Unexpected type %d\n", freefrag->ff_jdep->wk_type); cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd); } FREE_LOCK(ump); ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno, freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd, freefrag->ff_key); ACQUIRE_LOCK(ump); WORKITEM_FREE(freefrag, D_FREEFRAG); FREE_LOCK(ump); } /* * Set up a dependency structure for an external attributes data block. * This routine follows much of the structure of softdep_setup_allocdirect. * See the description of softdep_setup_allocdirect above for details. */ void softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp) struct inode *ip; ufs_lbn_t off; ufs2_daddr_t newblkno; ufs2_daddr_t oldblkno; long newsize; long oldsize; struct buf *bp; { struct allocdirect *adp, *oldadp; struct allocdirectlst *adphead; struct freefrag *freefrag; struct inodedep *inodedep; struct jnewblk *jnewblk; struct newblk *newblk; struct mount *mp; struct ufsmount *ump; ufs_lbn_t lbn; mp = ITOVFS(ip); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_allocext called on non-softdep filesystem")); KASSERT(off < UFS_NXADDR, ("softdep_setup_allocext: lbn %lld > UFS_NXADDR", (long long)off)); lbn = bp->b_lblkno; if (oldblkno && oldblkno != newblkno) /* * The usual case is that a smaller fragment that * was just allocated has been replaced with a bigger * fragment or a full-size block. If it is marked as * B_DELWRI, the current contents have not been written * to disk. It is possible that the block was written * earlier, but very uncommon. If the block has never * been written, there is no need to send a BIO_DELETE * for it when it is freed. The gain from avoiding the * TRIMs for the common case of unwritten blocks far * exceeds the cost of the write amplification for the * uncommon case of failing to send a TRIM for a block * that had been written. */ freefrag = newfreefrag(ip, oldblkno, oldsize, lbn, (bp->b_flags & B_DELWRI) != 0 ? NOTRIM_KEY : SINGLETON_KEY); else freefrag = NULL; ACQUIRE_LOCK(ump); if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) panic("softdep_setup_allocext: lost block"); KASSERT(newblk->nb_list.wk_type == D_NEWBLK, ("softdep_setup_allocext: newblk already initialized")); /* * Convert the newblk to an allocdirect. */ WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); adp = (struct allocdirect *)newblk; newblk->nb_freefrag = freefrag; adp->ad_offset = off; adp->ad_oldblkno = oldblkno; adp->ad_newsize = newsize; adp->ad_oldsize = oldsize; adp->ad_state |= EXTDATA; /* * Finish initializing the journal. */ if ((jnewblk = newblk->nb_jnewblk) != NULL) { jnewblk->jn_ino = ip->i_number; jnewblk->jn_lbn = lbn; add_to_journal(&jnewblk->jn_list); } if (freefrag && freefrag->ff_jdep != NULL && freefrag->ff_jdep->wk_type == D_JFREEFRAG) add_to_journal(freefrag->ff_jdep); inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); adp->ad_inodedep = inodedep; WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); /* * The list of allocdirects must be kept in sorted and ascending * order so that the rollback routines can quickly determine the * first uncommitted block (the size of the file stored on disk * ends at the end of the lowest committed fragment, or if there * are no fragments, at the end of the highest committed block). * Since files generally grow, the typical case is that the new * block is to be added at the end of the list. We speed this * special case by checking against the last allocdirect in the * list before laboriously traversing the list looking for the * insertion point. */ adphead = &inodedep->id_newextupdt; oldadp = TAILQ_LAST(adphead, allocdirectlst); if (oldadp == NULL || oldadp->ad_offset <= off) { /* insert at end of list */ TAILQ_INSERT_TAIL(adphead, adp, ad_next); if (oldadp != NULL && oldadp->ad_offset == off) allocdirect_merge(adphead, adp, oldadp); FREE_LOCK(ump); return; } TAILQ_FOREACH(oldadp, adphead, ad_next) { if (oldadp->ad_offset >= off) break; } if (oldadp == NULL) panic("softdep_setup_allocext: lost entry"); /* insert in middle of list */ TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); if (oldadp->ad_offset == off) allocdirect_merge(adphead, adp, oldadp); FREE_LOCK(ump); } /* * Indirect block allocation dependencies. * * The same dependencies that exist for a direct block also exist when * a new block is allocated and pointed to by an entry in a block of * indirect pointers. The undo/redo states described above are also * used here. Because an indirect block contains many pointers that * may have dependencies, a second copy of the entire in-memory indirect * block is kept. The buffer cache copy is always completely up-to-date. * The second copy, which is used only as a source for disk writes, * contains only the safe pointers (i.e., those that have no remaining * update dependencies). The second copy is freed when all pointers * are safe. The cache is not allowed to replace indirect blocks with * pending update dependencies. If a buffer containing an indirect * block with dependencies is written, these routines will mark it * dirty again. It can only be successfully written once all the * dependencies are removed. The ffs_fsync routine in conjunction with * softdep_sync_metadata work together to get all the dependencies * removed so that a file can be successfully written to disk. Three * procedures are used when setting up indirect block pointer * dependencies. The division is necessary because of the organization * of the "balloc" routine and because of the distinction between file * pages and file metadata blocks. */ /* * Allocate a new allocindir structure. */ static struct allocindir * newallocindir(ip, ptrno, newblkno, oldblkno, lbn) struct inode *ip; /* inode for file being extended */ int ptrno; /* offset of pointer in indirect block */ ufs2_daddr_t newblkno; /* disk block number being added */ ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ ufs_lbn_t lbn; { struct newblk *newblk; struct allocindir *aip; struct freefrag *freefrag; struct jnewblk *jnewblk; if (oldblkno) freefrag = newfreefrag(ip, oldblkno, ITOFS(ip)->fs_bsize, lbn, SINGLETON_KEY); else freefrag = NULL; ACQUIRE_LOCK(ITOUMP(ip)); if (newblk_lookup(ITOVFS(ip), newblkno, 0, &newblk) == 0) panic("new_allocindir: lost block"); KASSERT(newblk->nb_list.wk_type == D_NEWBLK, ("newallocindir: newblk already initialized")); WORKITEM_REASSIGN(newblk, D_ALLOCINDIR); newblk->nb_freefrag = freefrag; aip = (struct allocindir *)newblk; aip->ai_offset = ptrno; aip->ai_oldblkno = oldblkno; aip->ai_lbn = lbn; if ((jnewblk = newblk->nb_jnewblk) != NULL) { jnewblk->jn_ino = ip->i_number; jnewblk->jn_lbn = lbn; add_to_journal(&jnewblk->jn_list); } if (freefrag && freefrag->ff_jdep != NULL && freefrag->ff_jdep->wk_type == D_JFREEFRAG) add_to_journal(freefrag->ff_jdep); return (aip); } /* * Called just before setting an indirect block pointer * to a newly allocated file page. */ void softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) struct inode *ip; /* inode for file being extended */ ufs_lbn_t lbn; /* allocated block number within file */ struct buf *bp; /* buffer with indirect blk referencing page */ int ptrno; /* offset of pointer in indirect block */ ufs2_daddr_t newblkno; /* disk block number being added */ ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ struct buf *nbp; /* buffer holding allocated page */ { struct inodedep *inodedep; struct freefrag *freefrag; struct allocindir *aip; struct pagedep *pagedep; struct mount *mp; struct ufsmount *ump; mp = ITOVFS(ip); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_allocindir_page called on non-softdep filesystem")); KASSERT(lbn == nbp->b_lblkno, ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd", lbn, bp->b_lblkno)); CTR4(KTR_SUJ, "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd " "lbn %jd", ip->i_number, newblkno, oldblkno, lbn); ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page"); aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn); (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); /* * If we are allocating a directory page, then we must * allocate an associated pagedep to track additions and * deletions. */ if ((ip->i_mode & IFMT) == IFDIR) pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep); WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn); FREE_LOCK(ump); if (freefrag) handle_workitem_freefrag(freefrag); } /* * Called just before setting an indirect block pointer to a * newly allocated indirect block. */ void softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) struct buf *nbp; /* newly allocated indirect block */ struct inode *ip; /* inode for file being extended */ struct buf *bp; /* indirect block referencing allocated block */ int ptrno; /* offset of pointer in indirect block */ ufs2_daddr_t newblkno; /* disk block number being added */ { struct inodedep *inodedep; struct allocindir *aip; struct ufsmount *ump; ufs_lbn_t lbn; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_setup_allocindir_meta called on non-softdep filesystem")); CTR3(KTR_SUJ, "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d", ip->i_number, newblkno, ptrno); lbn = nbp->b_lblkno; ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta"); aip = newallocindir(ip, ptrno, newblkno, 0, lbn); inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep); WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)) panic("softdep_setup_allocindir_meta: Block already existed"); FREE_LOCK(ump); } static void indirdep_complete(indirdep) struct indirdep *indirdep; { struct allocindir *aip; LIST_REMOVE(indirdep, ir_next); indirdep->ir_state |= DEPCOMPLETE; while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) { LIST_REMOVE(aip, ai_next); free_newblk(&aip->ai_block); } /* * If this indirdep is not attached to a buf it was simply waiting * on completion to clear completehd. free_indirdep() asserts * that nothing is dangling. */ if ((indirdep->ir_state & ONWORKLIST) == 0) free_indirdep(indirdep); } static struct indirdep * indirdep_lookup(mp, ip, bp) struct mount *mp; struct inode *ip; struct buf *bp; { struct indirdep *indirdep, *newindirdep; struct newblk *newblk; struct ufsmount *ump; struct worklist *wk; struct fs *fs; ufs2_daddr_t blkno; ump = VFSTOUFS(mp); LOCK_OWNED(ump); indirdep = NULL; newindirdep = NULL; fs = ump->um_fs; for (;;) { LIST_FOREACH(wk, &bp->b_dep, wk_list) { if (wk->wk_type != D_INDIRDEP) continue; indirdep = WK_INDIRDEP(wk); break; } /* Found on the buffer worklist, no new structure to free. */ if (indirdep != NULL && newindirdep == NULL) return (indirdep); if (indirdep != NULL && newindirdep != NULL) panic("indirdep_lookup: simultaneous create"); /* None found on the buffer and a new structure is ready. */ if (indirdep == NULL && newindirdep != NULL) break; /* None found and no new structure available. */ FREE_LOCK(ump); newindirdep = malloc(sizeof(struct indirdep), M_INDIRDEP, M_SOFTDEP_FLAGS); workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp); newindirdep->ir_state = ATTACHED; if (I_IS_UFS1(ip)) newindirdep->ir_state |= UFS1FMT; TAILQ_INIT(&newindirdep->ir_trunc); newindirdep->ir_saveddata = NULL; LIST_INIT(&newindirdep->ir_deplisthd); LIST_INIT(&newindirdep->ir_donehd); LIST_INIT(&newindirdep->ir_writehd); LIST_INIT(&newindirdep->ir_completehd); if (bp->b_blkno == bp->b_lblkno) { ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, NULL, NULL); bp->b_blkno = blkno; } newindirdep->ir_freeblks = NULL; newindirdep->ir_savebp = getblk(ump->um_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0); newindirdep->ir_bp = bp; BUF_KERNPROC(newindirdep->ir_savebp); bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); ACQUIRE_LOCK(ump); } indirdep = newindirdep; WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); /* * If the block is not yet allocated we don't set DEPCOMPLETE so * that we don't free dependencies until the pointers are valid. * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather * than using the hash. */ if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)) LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next); else indirdep->ir_state |= DEPCOMPLETE; return (indirdep); } /* * Called to finish the allocation of the "aip" allocated * by one of the two routines above. */ static struct freefrag * setup_allocindir_phase2(bp, ip, inodedep, aip, lbn) struct buf *bp; /* in-memory copy of the indirect block */ struct inode *ip; /* inode for file being extended */ struct inodedep *inodedep; /* Inodedep for ip */ struct allocindir *aip; /* allocindir allocated by the above routines */ ufs_lbn_t lbn; /* Logical block number for this block. */ { struct fs *fs; struct indirdep *indirdep; struct allocindir *oldaip; struct freefrag *freefrag; struct mount *mp; struct ufsmount *ump; mp = ITOVFS(ip); ump = VFSTOUFS(mp); LOCK_OWNED(ump); fs = ump->um_fs; if (bp->b_lblkno >= 0) panic("setup_allocindir_phase2: not indir blk"); KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs), ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset)); indirdep = indirdep_lookup(mp, ip, bp); KASSERT(indirdep->ir_savebp != NULL, ("setup_allocindir_phase2 NULL ir_savebp")); aip->ai_indirdep = indirdep; /* * Check for an unwritten dependency for this indirect offset. If * there is, merge the old dependency into the new one. This happens * as a result of reallocblk only. */ freefrag = NULL; if (aip->ai_oldblkno != 0) { LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) { if (oldaip->ai_offset == aip->ai_offset) { freefrag = allocindir_merge(aip, oldaip); goto done; } } LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) { if (oldaip->ai_offset == aip->ai_offset) { freefrag = allocindir_merge(aip, oldaip); goto done; } } } done: LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); return (freefrag); } /* * Merge two allocindirs which refer to the same block. Move newblock * dependencies and setup the freefrags appropriately. */ static struct freefrag * allocindir_merge(aip, oldaip) struct allocindir *aip; struct allocindir *oldaip; { struct freefrag *freefrag; struct worklist *wk; if (oldaip->ai_newblkno != aip->ai_oldblkno) panic("allocindir_merge: blkno"); aip->ai_oldblkno = oldaip->ai_oldblkno; freefrag = aip->ai_freefrag; aip->ai_freefrag = oldaip->ai_freefrag; oldaip->ai_freefrag = NULL; KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag")); /* * If we are tracking a new directory-block allocation, * move it from the old allocindir to the new allocindir. */ if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) { WORKLIST_REMOVE(wk); if (!LIST_EMPTY(&oldaip->ai_newdirblk)) panic("allocindir_merge: extra newdirblk"); WORKLIST_INSERT(&aip->ai_newdirblk, wk); } /* * We can skip journaling for this freefrag and just complete * any pending journal work for the allocindir that is being * removed after the freefrag completes. */ if (freefrag->ff_jdep) cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep)); LIST_REMOVE(oldaip, ai_next); freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block, &freefrag->ff_list, &freefrag->ff_jwork); free_newblk(&oldaip->ai_block); return (freefrag); } static inline void setup_freedirect(freeblks, ip, i, needj) struct freeblks *freeblks; struct inode *ip; int i; int needj; { struct ufsmount *ump; ufs2_daddr_t blkno; int frags; blkno = DIP(ip, i_db[i]); if (blkno == 0) return; DIP_SET(ip, i_db[i], 0); ump = ITOUMP(ip); frags = sblksize(ump->um_fs, ip->i_size, i); frags = numfrags(ump->um_fs, frags); newfreework(ump, freeblks, NULL, i, blkno, frags, 0, needj); } static inline void setup_freeext(freeblks, ip, i, needj) struct freeblks *freeblks; struct inode *ip; int i; int needj; { struct ufsmount *ump; ufs2_daddr_t blkno; int frags; blkno = ip->i_din2->di_extb[i]; if (blkno == 0) return; ip->i_din2->di_extb[i] = 0; ump = ITOUMP(ip); frags = sblksize(ump->um_fs, ip->i_din2->di_extsize, i); frags = numfrags(ump->um_fs, frags); newfreework(ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj); } static inline void setup_freeindir(freeblks, ip, i, lbn, needj) struct freeblks *freeblks; struct inode *ip; int i; ufs_lbn_t lbn; int needj; { struct ufsmount *ump; ufs2_daddr_t blkno; blkno = DIP(ip, i_ib[i]); if (blkno == 0) return; DIP_SET(ip, i_ib[i], 0); ump = ITOUMP(ip); newfreework(ump, freeblks, NULL, lbn, blkno, ump->um_fs->fs_frag, 0, needj); } static inline struct freeblks * newfreeblks(mp, ip) struct mount *mp; struct inode *ip; { struct freeblks *freeblks; freeblks = malloc(sizeof(struct freeblks), M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp); LIST_INIT(&freeblks->fb_jblkdephd); LIST_INIT(&freeblks->fb_jwork); freeblks->fb_ref = 0; freeblks->fb_cgwait = 0; freeblks->fb_state = ATTACHED; freeblks->fb_uid = ip->i_uid; freeblks->fb_inum = ip->i_number; freeblks->fb_vtype = ITOV(ip)->v_type; freeblks->fb_modrev = DIP(ip, i_modrev); freeblks->fb_devvp = ITODEVVP(ip); freeblks->fb_chkcnt = 0; freeblks->fb_len = 0; return (freeblks); } static void trunc_indirdep(indirdep, freeblks, bp, off) struct indirdep *indirdep; struct freeblks *freeblks; struct buf *bp; int off; { struct allocindir *aip, *aipn; /* * The first set of allocindirs won't be in savedbp. */ LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn) if (aip->ai_offset > off) cancel_allocindir(aip, bp, freeblks, 1); LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn) if (aip->ai_offset > off) cancel_allocindir(aip, bp, freeblks, 1); /* * These will exist in savedbp. */ LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn) if (aip->ai_offset > off) cancel_allocindir(aip, NULL, freeblks, 0); LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn) if (aip->ai_offset > off) cancel_allocindir(aip, NULL, freeblks, 0); } /* * Follow the chain of indirects down to lastlbn creating a freework * structure for each. This will be used to start indir_trunc() at * the right offset and create the journal records for the parrtial * truncation. A second step will handle the truncated dependencies. */ static int setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno) struct freeblks *freeblks; struct inode *ip; ufs_lbn_t lbn; ufs_lbn_t lastlbn; ufs2_daddr_t blkno; { struct indirdep *indirdep; struct indirdep *indirn; struct freework *freework; struct newblk *newblk; struct mount *mp; struct ufsmount *ump; struct buf *bp; uint8_t *start; uint8_t *end; ufs_lbn_t lbnadd; int level; int error; int off; freework = NULL; if (blkno == 0) return (0); mp = freeblks->fb_list.wk_mp; ump = VFSTOUFS(mp); bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0); if ((bp->b_flags & B_CACHE) == 0) { bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno); bp->b_iocmd = BIO_READ; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; vfs_busy_pages(bp, 0); bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); #ifdef RACCT if (racct_enable) { PROC_LOCK(curproc); racct_add_buf(curproc, bp, 0); PROC_UNLOCK(curproc); } #endif /* RACCT */ curthread->td_ru.ru_inblock++; error = bufwait(bp); if (error) { brelse(bp); return (error); } } level = lbn_level(lbn); lbnadd = lbn_offset(ump->um_fs, level); /* * Compute the offset of the last block we want to keep. Store * in the freework the first block we want to completely free. */ off = (lastlbn - -(lbn + level)) / lbnadd; if (off + 1 == NINDIR(ump->um_fs)) goto nowork; freework = newfreework(ump, freeblks, NULL, lbn, blkno, 0, off + 1, 0); /* * Link the freework into the indirdep. This will prevent any new * allocations from proceeding until we are finished with the * truncate and the block is written. */ ACQUIRE_LOCK(ump); indirdep = indirdep_lookup(mp, ip, bp); if (indirdep->ir_freeblks) panic("setup_trunc_indir: indirdep already truncated."); TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); freework->fw_indir = indirdep; /* * Cancel any allocindirs that will not make it to disk. * We have to do this for all copies of the indirdep that * live on this newblk. */ if ((indirdep->ir_state & DEPCOMPLETE) == 0) { if (newblk_lookup(mp, dbtofsb(ump->um_fs, bp->b_blkno), 0, &newblk) == 0) panic("setup_trunc_indir: lost block"); LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next) trunc_indirdep(indirn, freeblks, bp, off); } else trunc_indirdep(indirdep, freeblks, bp, off); FREE_LOCK(ump); /* * Creation is protected by the buf lock. The saveddata is only * needed if a full truncation follows a partial truncation but it * is difficult to allocate in that case so we fetch it anyway. */ if (indirdep->ir_saveddata == NULL) indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, M_SOFTDEP_FLAGS); nowork: /* Fetch the blkno of the child and the zero start offset. */ if (I_IS_UFS1(ip)) { blkno = ((ufs1_daddr_t *)bp->b_data)[off]; start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1]; } else { blkno = ((ufs2_daddr_t *)bp->b_data)[off]; start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1]; } if (freework) { /* Zero the truncated pointers. */ end = bp->b_data + bp->b_bcount; bzero(start, end - start); bdwrite(bp); } else bqrelse(bp); if (level == 0) return (0); lbn++; /* adjust level */ lbn -= (off * lbnadd); return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno); } /* * Complete the partial truncation of an indirect block setup by * setup_trunc_indir(). This zeros the truncated pointers in the saved * copy and writes them to disk before the freeblks is allowed to complete. */ static void complete_trunc_indir(freework) struct freework *freework; { struct freework *fwn; struct indirdep *indirdep; struct ufsmount *ump; struct buf *bp; uintptr_t start; int count; ump = VFSTOUFS(freework->fw_list.wk_mp); LOCK_OWNED(ump); indirdep = freework->fw_indir; for (;;) { bp = indirdep->ir_bp; /* See if the block was discarded. */ if (bp == NULL) break; /* Inline part of getdirtybuf(). We dont want bremfree. */ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) break; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, LOCK_PTR(ump)) == 0) BUF_UNLOCK(bp); ACQUIRE_LOCK(ump); } freework->fw_state |= DEPCOMPLETE; TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next); /* * Zero the pointers in the saved copy. */ if (indirdep->ir_state & UFS1FMT) start = sizeof(ufs1_daddr_t); else start = sizeof(ufs2_daddr_t); start *= freework->fw_start; count = indirdep->ir_savebp->b_bcount - start; start += (uintptr_t)indirdep->ir_savebp->b_data; bzero((char *)start, count); /* * We need to start the next truncation in the list if it has not * been started yet. */ fwn = TAILQ_FIRST(&indirdep->ir_trunc); if (fwn != NULL) { if (fwn->fw_freeblks == indirdep->ir_freeblks) TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next); if ((fwn->fw_state & ONWORKLIST) == 0) freework_enqueue(fwn); } /* * If bp is NULL the block was fully truncated, restore * the saved block list otherwise free it if it is no * longer needed. */ if (TAILQ_EMPTY(&indirdep->ir_trunc)) { if (bp == NULL) bcopy(indirdep->ir_saveddata, indirdep->ir_savebp->b_data, indirdep->ir_savebp->b_bcount); free(indirdep->ir_saveddata, M_INDIRDEP); indirdep->ir_saveddata = NULL; } /* * When bp is NULL there is a full truncation pending. We * must wait for this full truncation to be journaled before * we can release this freework because the disk pointers will * never be written as zero. */ if (bp == NULL) { if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd)) handle_written_freework(freework); else WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd, &freework->fw_list); } else { /* Complete when the real copy is written. */ WORKLIST_INSERT(&bp->b_dep, &freework->fw_list); BUF_UNLOCK(bp); } } /* * Calculate the number of blocks we are going to release where datablocks * is the current total and length is the new file size. */ static ufs2_daddr_t blkcount(fs, datablocks, length) struct fs *fs; ufs2_daddr_t datablocks; off_t length; { off_t totblks, numblks; totblks = 0; numblks = howmany(length, fs->fs_bsize); if (numblks <= UFS_NDADDR) { totblks = howmany(length, fs->fs_fsize); goto out; } totblks = blkstofrags(fs, numblks); numblks -= UFS_NDADDR; /* * Count all single, then double, then triple indirects required. * Subtracting one indirects worth of blocks for each pass * acknowledges one of each pointed to by the inode. */ for (;;) { totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs))); numblks -= NINDIR(fs); if (numblks <= 0) break; numblks = howmany(numblks, NINDIR(fs)); } out: totblks = fsbtodb(fs, totblks); /* * Handle sparse files. We can't reclaim more blocks than the inode * references. We will correct it later in handle_complete_freeblks() * when we know the real count. */ if (totblks > datablocks) return (0); return (datablocks - totblks); } /* * Handle freeblocks for journaled softupdate filesystems. * * Contrary to normal softupdates, we must preserve the block pointers in * indirects until their subordinates are free. This is to avoid journaling * every block that is freed which may consume more space than the journal * itself. The recovery program will see the free block journals at the * base of the truncated area and traverse them to reclaim space. The * pointers in the inode may be cleared immediately after the journal * records are written because each direct and indirect pointer in the * inode is recorded in a journal. This permits full truncation to proceed * asynchronously. The write order is journal -> inode -> cgs -> indirects. * * The algorithm is as follows: * 1) Traverse the in-memory state and create journal entries to release * the relevant blocks and full indirect trees. * 2) Traverse the indirect block chain adding partial truncation freework * records to indirects in the path to lastlbn. The freework will * prevent new allocation dependencies from being satisfied in this * indirect until the truncation completes. * 3) Read and lock the inode block, performing an update with the new size * and pointers. This prevents truncated data from becoming valid on * disk through step 4. * 4) Reap unsatisfied dependencies that are beyond the truncated area, * eliminate journal work for those records that do not require it. * 5) Schedule the journal records to be written followed by the inode block. * 6) Allocate any necessary frags for the end of file. * 7) Zero any partially truncated blocks. * * From this truncation proceeds asynchronously using the freework and * indir_trunc machinery. The file will not be extended again into a * partially truncated indirect block until all work is completed but * the normal dependency mechanism ensures that it is rolled back/forward * as appropriate. Further truncation may occur without delay and is * serialized in indir_trunc(). */ void softdep_journal_freeblocks(ip, cred, length, flags) struct inode *ip; /* The inode whose length is to be reduced */ struct ucred *cred; off_t length; /* The new length for the file */ int flags; /* IO_EXT and/or IO_NORMAL */ { struct freeblks *freeblks, *fbn; struct worklist *wk, *wkn; struct inodedep *inodedep; struct jblkdep *jblkdep; struct allocdirect *adp, *adpn; struct ufsmount *ump; struct fs *fs; struct buf *bp; struct vnode *vp; struct mount *mp; ufs2_daddr_t extblocks, datablocks; ufs_lbn_t tmpval, lbn, lastlbn; int frags, lastoff, iboff, allocblock, needj, error, i; ump = ITOUMP(ip); mp = UFSTOVFS(ump); fs = ump->um_fs; KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_journal_freeblocks called on non-softdep filesystem")); vp = ITOV(ip); needj = 1; iboff = -1; allocblock = 0; extblocks = 0; datablocks = 0; frags = 0; freeblks = newfreeblks(mp, ip); ACQUIRE_LOCK(ump); /* * If we're truncating a removed file that will never be written * we don't need to journal the block frees. The canceled journals * for the allocations will suffice. */ inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED && length == 0) needj = 0; CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d", ip->i_number, length, needj); FREE_LOCK(ump); /* * Calculate the lbn that we are truncating to. This results in -1 * if we're truncating the 0 bytes. So it is the last lbn we want * to keep, not the first lbn we want to truncate. */ lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastoff = blkoff(fs, length); /* * Compute frags we are keeping in lastlbn. 0 means all. */ if (lastlbn >= 0 && lastlbn < UFS_NDADDR) { frags = fragroundup(fs, lastoff); /* adp offset of last valid allocdirect. */ iboff = lastlbn; } else if (lastlbn > 0) iboff = UFS_NDADDR; if (fs->fs_magic == FS_UFS2_MAGIC) extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); /* * Handle normal data blocks and indirects. This section saves * values used after the inode update to complete frag and indirect * truncation. */ if ((flags & IO_NORMAL) != 0) { /* * Handle truncation of whole direct and indirect blocks. */ for (i = iboff + 1; i < UFS_NDADDR; i++) setup_freedirect(freeblks, ip, i, needj); for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR; i < UFS_NIADDR; i++, lbn += tmpval, tmpval *= NINDIR(fs)) { /* Release a whole indirect tree. */ if (lbn > lastlbn) { setup_freeindir(freeblks, ip, i, -lbn -i, needj); continue; } iboff = i + UFS_NDADDR; /* * Traverse partially truncated indirect tree. */ if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn) setup_trunc_indir(freeblks, ip, -lbn - i, lastlbn, DIP(ip, i_ib[i])); } /* * Handle partial truncation to a frag boundary. */ if (frags) { ufs2_daddr_t blkno; long oldfrags; oldfrags = blksize(fs, ip, lastlbn); blkno = DIP(ip, i_db[lastlbn]); if (blkno && oldfrags != frags) { oldfrags -= frags; oldfrags = numfrags(fs, oldfrags); blkno += numfrags(fs, frags); newfreework(ump, freeblks, NULL, lastlbn, blkno, oldfrags, 0, needj); if (needj) adjust_newfreework(freeblks, numfrags(fs, frags)); } else if (blkno == 0) allocblock = 1; } /* * Add a journal record for partial truncate if we are * handling indirect blocks. Non-indirects need no extra * journaling. */ if (length != 0 && lastlbn >= UFS_NDADDR) { ip->i_flag |= IN_TRUNCATED; newjtrunc(freeblks, length, 0); } ip->i_size = length; DIP_SET(ip, i_size, ip->i_size); datablocks = DIP(ip, i_blocks) - extblocks; if (length != 0) datablocks = blkcount(fs, datablocks, length); freeblks->fb_len = length; } if ((flags & IO_EXT) != 0) { for (i = 0; i < UFS_NXADDR; i++) setup_freeext(freeblks, ip, i, needj); ip->i_din2->di_extsize = 0; datablocks += extblocks; } #ifdef QUOTA /* Reference the quotas in case the block count is wrong in the end. */ quotaref(vp, freeblks->fb_quota); (void) chkdq(ip, -datablocks, NOCRED, 0); #endif freeblks->fb_chkcnt = -datablocks; UFS_LOCK(ump); fs->fs_pendingblocks += datablocks; UFS_UNLOCK(ump); DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); /* * Handle truncation of incomplete alloc direct dependencies. We * hold the inode block locked to prevent incomplete dependencies * from reaching the disk while we are eliminating those that * have been truncated. This is a partially inlined ffs_update(). */ ufs_itimes(vp); ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->fs_bsize, cred, &bp); if (error) { brelse(bp); softdep_error("softdep_journal_freeblocks", error); return; } if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; softdep_update_inodeblock(ip, bp, 0); - if (ump->um_fstype == UFS1) + if (ump->um_fstype == UFS1) { *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; - else + } else { *((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; + } ACQUIRE_LOCK(ump); (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); if ((inodedep->id_state & IOSTARTED) != 0) panic("softdep_setup_freeblocks: inode busy"); /* * Add the freeblks structure to the list of operations that * must await the zero'ed inode being written to disk. If we * still have a bitmap dependency (needj), then the inode * has never been written to disk, so we can process the * freeblks below once we have deleted the dependencies. */ if (needj) WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); else freeblks->fb_state |= COMPLETE; if ((flags & IO_NORMAL) != 0) { TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) { if (adp->ad_offset > iboff) cancel_allocdirect(&inodedep->id_inoupdt, adp, freeblks); /* * Truncate the allocdirect. We could eliminate * or modify journal records as well. */ else if (adp->ad_offset == iboff && frags) adp->ad_newsize = frags; } } if ((flags & IO_EXT) != 0) while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) cancel_allocdirect(&inodedep->id_extupdt, adp, freeblks); /* * Scan the bufwait list for newblock dependencies that will never * make it to disk. */ LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) { if (wk->wk_type != D_ALLOCDIRECT) continue; adp = WK_ALLOCDIRECT(wk); if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) || ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) { cancel_jfreeblk(freeblks, adp->ad_newblkno); cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork); WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); } } /* * Add journal work. */ LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) add_to_journal(&jblkdep->jb_list); FREE_LOCK(ump); bdwrite(bp); /* * Truncate dependency structures beyond length. */ trunc_dependencies(ip, freeblks, lastlbn, frags, flags); /* * This is only set when we need to allocate a fragment because * none existed at the end of a frag-sized file. It handles only * allocating a new, zero filled block. */ if (allocblock) { ip->i_size = length - lastoff; DIP_SET(ip, i_size, ip->i_size); error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp); if (error != 0) { softdep_error("softdep_journal_freeblks", error); return; } ip->i_size = length; DIP_SET(ip, i_size, length); ip->i_flag |= IN_CHANGE | IN_UPDATE; allocbuf(bp, frags); ffs_update(vp, 0); bawrite(bp); } else if (lastoff != 0 && vp->v_type != VDIR) { int size; /* * Zero the end of a truncated frag or block. */ size = sblksize(fs, length, lastlbn); error = bread(vp, lastlbn, size, cred, &bp); if (error) { softdep_error("softdep_journal_freeblks", error); return; } bzero((char *)bp->b_data + lastoff, size - lastoff); bawrite(bp); } ACQUIRE_LOCK(ump); inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next); freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST; /* * We zero earlier truncations so they don't erroneously * update i_blocks. */ if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0) TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next) fbn->fb_len = 0; if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) freeblks->fb_state |= INPROGRESS; else freeblks = NULL; FREE_LOCK(ump); if (freeblks) handle_workitem_freeblocks(freeblks, 0); trunc_pages(ip, length, extblocks, flags); } /* * Flush a JOP_SYNC to the journal. */ void softdep_journal_fsync(ip) struct inode *ip; { struct jfsync *jfsync; struct ufsmount *ump; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_journal_fsync called on non-softdep filesystem")); if ((ip->i_flag & IN_TRUNCATED) == 0) return; ip->i_flag &= ~IN_TRUNCATED; jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO); workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ump)); jfsync->jfs_size = ip->i_size; jfsync->jfs_ino = ip->i_number; ACQUIRE_LOCK(ump); add_to_journal(&jfsync->jfs_list); jwait(&jfsync->jfs_list, MNT_WAIT); FREE_LOCK(ump); } /* * Block de-allocation dependencies. * * When blocks are de-allocated, the on-disk pointers must be nullified before * the blocks are made available for use by other files. (The true * requirement is that old pointers must be nullified before new on-disk * pointers are set. We chose this slightly more stringent requirement to * reduce complexity.) Our implementation handles this dependency by updating * the inode (or indirect block) appropriately but delaying the actual block * de-allocation (i.e., freemap and free space count manipulation) until * after the updated versions reach stable storage. After the disk is * updated, the blocks can be safely de-allocated whenever it is convenient. * This implementation handles only the common case of reducing a file's * length to zero. Other cases are handled by the conventional synchronous * write approach. * * The ffs implementation with which we worked double-checks * the state of the block pointers and file size as it reduces * a file's length. Some of this code is replicated here in our * soft updates implementation. The freeblks->fb_chkcnt field is * used to transfer a part of this information to the procedure * that eventually de-allocates the blocks. * * This routine should be called from the routine that shortens * a file's length, before the inode's size or block pointers * are modified. It will save the block pointer information for * later release and zero the inode so that the calling routine * can release it. */ void softdep_setup_freeblocks(ip, length, flags) struct inode *ip; /* The inode whose length is to be reduced */ off_t length; /* The new length for the file */ int flags; /* IO_EXT and/or IO_NORMAL */ { struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; struct freeblks *freeblks; struct inodedep *inodedep; struct allocdirect *adp; struct ufsmount *ump; struct buf *bp; struct fs *fs; ufs2_daddr_t extblocks, datablocks; struct mount *mp; int i, delay, error; ufs_lbn_t tmpval; ufs_lbn_t lbn; ump = ITOUMP(ip); mp = UFSTOVFS(ump); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_freeblocks called on non-softdep filesystem")); CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld", ip->i_number, length); KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length")); fs = ump->um_fs; if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); softdep_error("softdep_setup_freeblocks", error); return; } freeblks = newfreeblks(mp, ip); extblocks = 0; datablocks = 0; if (fs->fs_magic == FS_UFS2_MAGIC) extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); if ((flags & IO_NORMAL) != 0) { for (i = 0; i < UFS_NDADDR; i++) setup_freedirect(freeblks, ip, i, 0); for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR; i < UFS_NIADDR; i++, lbn += tmpval, tmpval *= NINDIR(fs)) setup_freeindir(freeblks, ip, i, -lbn -i, 0); ip->i_size = 0; DIP_SET(ip, i_size, 0); datablocks = DIP(ip, i_blocks) - extblocks; } if ((flags & IO_EXT) != 0) { for (i = 0; i < UFS_NXADDR; i++) setup_freeext(freeblks, ip, i, 0); ip->i_din2->di_extsize = 0; datablocks += extblocks; } #ifdef QUOTA /* Reference the quotas in case the block count is wrong in the end. */ quotaref(ITOV(ip), freeblks->fb_quota); (void) chkdq(ip, -datablocks, NOCRED, 0); #endif freeblks->fb_chkcnt = -datablocks; UFS_LOCK(ump); fs->fs_pendingblocks += datablocks; UFS_UNLOCK(ump); DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); /* * Push the zero'ed inode to its disk buffer so that we are free * to delete its dependencies below. Once the dependencies are gone * the buffer can be safely released. */ if (ump->um_fstype == UFS1) { dp1 = ((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)); ip->i_din1->di_freelink = dp1->di_freelink; *dp1 = *ip->i_din1; } else { dp2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)); ip->i_din2->di_freelink = dp2->di_freelink; *dp2 = *ip->i_din2; } /* * Find and eliminate any inode dependencies. */ ACQUIRE_LOCK(ump); (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); if ((inodedep->id_state & IOSTARTED) != 0) panic("softdep_setup_freeblocks: inode busy"); /* * Add the freeblks structure to the list of operations that * must await the zero'ed inode being written to disk. If we * still have a bitmap dependency (delay == 0), then the inode * has never been written to disk, so we can process the * freeblks below once we have deleted the dependencies. */ delay = (inodedep->id_state & DEPCOMPLETE); if (delay) WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); else freeblks->fb_state |= COMPLETE; /* * Because the file length has been truncated to zero, any * pending block allocation dependency structures associated * with this inode are obsolete and can simply be de-allocated. * We must first merge the two dependency lists to get rid of * any duplicate freefrag structures, then purge the merged list. * If we still have a bitmap dependency, then the inode has never * been written to disk, so we can free any fragments without delay. */ if (flags & IO_NORMAL) { merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) cancel_allocdirect(&inodedep->id_inoupdt, adp, freeblks); } if (flags & IO_EXT) { merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) cancel_allocdirect(&inodedep->id_extupdt, adp, freeblks); } FREE_LOCK(ump); bdwrite(bp); trunc_dependencies(ip, freeblks, -1, 0, flags); ACQUIRE_LOCK(ump); if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) (void) free_inodedep(inodedep); freeblks->fb_state |= DEPCOMPLETE; /* * If the inode with zeroed block pointers is now on disk * we can start freeing blocks. */ if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) freeblks->fb_state |= INPROGRESS; else freeblks = NULL; FREE_LOCK(ump); if (freeblks) handle_workitem_freeblocks(freeblks, 0); trunc_pages(ip, length, extblocks, flags); } /* * Eliminate pages from the page cache that back parts of this inode and * adjust the vnode pager's idea of our size. This prevents stale data * from hanging around in the page cache. */ static void trunc_pages(ip, length, extblocks, flags) struct inode *ip; off_t length; ufs2_daddr_t extblocks; int flags; { struct vnode *vp; struct fs *fs; ufs_lbn_t lbn; off_t end, extend; vp = ITOV(ip); fs = ITOFS(ip); extend = OFF_TO_IDX(lblktosize(fs, -extblocks)); if ((flags & IO_EXT) != 0) vn_pages_remove(vp, extend, 0); if ((flags & IO_NORMAL) == 0) return; BO_LOCK(&vp->v_bufobj); drain_output(vp); BO_UNLOCK(&vp->v_bufobj); /* * The vnode pager eliminates file pages we eliminate indirects * below. */ vnode_pager_setsize(vp, length); /* * Calculate the end based on the last indirect we want to keep. If * the block extends into indirects we can just use the negative of * its lbn. Doubles and triples exist at lower numbers so we must * be careful not to remove those, if they exist. double and triple * indirect lbns do not overlap with others so it is not important * to verify how many levels are required. */ lbn = lblkno(fs, length); if (lbn >= UFS_NDADDR) { /* Calculate the virtual lbn of the triple indirect. */ lbn = -lbn - (UFS_NIADDR - 1); end = OFF_TO_IDX(lblktosize(fs, lbn)); } else end = extend; vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end); } /* * See if the buf bp is in the range eliminated by truncation. */ static int trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags) struct buf *bp; int *blkoffp; ufs_lbn_t lastlbn; int lastoff; int flags; { ufs_lbn_t lbn; *blkoffp = 0; /* Only match ext/normal blocks as appropriate. */ if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0)) return (0); /* ALTDATA is always a full truncation. */ if ((bp->b_xflags & BX_ALTDATA) != 0) return (1); /* -1 is full truncation. */ if (lastlbn == -1) return (1); /* * If this is a partial truncate we only want those * blocks and indirect blocks that cover the range * we're after. */ lbn = bp->b_lblkno; if (lbn < 0) lbn = -(lbn + lbn_level(lbn)); if (lbn < lastlbn) return (0); /* Here we only truncate lblkno if it's partial. */ if (lbn == lastlbn) { if (lastoff == 0) return (0); *blkoffp = lastoff; } return (1); } /* * Eliminate any dependencies that exist in memory beyond lblkno:off */ static void trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags) struct inode *ip; struct freeblks *freeblks; ufs_lbn_t lastlbn; int lastoff; int flags; { struct bufobj *bo; struct vnode *vp; struct buf *bp; int blkoff; /* * We must wait for any I/O in progress to finish so that * all potential buffers on the dirty list will be visible. * Once they are all there, walk the list and get rid of * any dependencies. */ vp = ITOV(ip); bo = &vp->v_bufobj; BO_LOCK(bo); drain_output(vp); TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) bp->b_vflags &= ~BV_SCANNED; restart: TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { if (bp->b_vflags & BV_SCANNED) continue; if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { bp->b_vflags |= BV_SCANNED; continue; } KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer")); if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL) goto restart; BO_UNLOCK(bo); if (deallocate_dependencies(bp, freeblks, blkoff)) bqrelse(bp); else brelse(bp); BO_LOCK(bo); goto restart; } /* * Now do the work of vtruncbuf while also matching indirect blocks. */ TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) bp->b_vflags &= ~BV_SCANNED; cleanrestart: TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) { if (bp->b_vflags & BV_SCANNED) continue; if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { bp->b_vflags |= BV_SCANNED; continue; } if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo)) == ENOLCK) { BO_LOCK(bo); goto cleanrestart; } bp->b_vflags |= BV_SCANNED; bremfree(bp); if (blkoff != 0) { allocbuf(bp, blkoff); bqrelse(bp); } else { bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF; brelse(bp); } BO_LOCK(bo); goto cleanrestart; } drain_output(vp); BO_UNLOCK(bo); } static int cancel_pagedep(pagedep, freeblks, blkoff) struct pagedep *pagedep; struct freeblks *freeblks; int blkoff; { struct jremref *jremref; struct jmvref *jmvref; struct dirrem *dirrem, *tmp; int i; /* * Copy any directory remove dependencies to the list * to be processed after the freeblks proceeds. If * directory entry never made it to disk they * can be dumped directly onto the work list. */ LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) { /* Skip this directory removal if it is intended to remain. */ if (dirrem->dm_offset < blkoff) continue; /* * If there are any dirrems we wait for the journal write * to complete and then restart the buf scan as the lock * has been dropped. */ while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) { jwait(&jremref->jr_list, MNT_WAIT); return (ERESTART); } LIST_REMOVE(dirrem, dm_next); dirrem->dm_dirinum = pagedep->pd_ino; WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list); } while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) { jwait(&jmvref->jm_list, MNT_WAIT); return (ERESTART); } /* * When we're partially truncating a pagedep we just want to flush * journal entries and return. There can not be any adds in the * truncated portion of the directory and newblk must remain if * part of the block remains. */ if (blkoff != 0) { struct diradd *dap; LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) if (dap->da_offset > blkoff) panic("cancel_pagedep: diradd %p off %d > %d", dap, dap->da_offset, blkoff); for (i = 0; i < DAHASHSZ; i++) LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) if (dap->da_offset > blkoff) panic("cancel_pagedep: diradd %p off %d > %d", dap, dap->da_offset, blkoff); return (0); } /* * There should be no directory add dependencies present * as the directory could not be truncated until all * children were removed. */ KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL, ("deallocate_dependencies: pendinghd != NULL")); for (i = 0; i < DAHASHSZ; i++) KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL, ("deallocate_dependencies: diraddhd != NULL")); if ((pagedep->pd_state & NEWBLOCK) != 0) free_newdirblk(pagedep->pd_newdirblk); if (free_pagedep(pagedep) == 0) panic("Failed to free pagedep %p", pagedep); return (0); } /* * Reclaim any dependency structures from a buffer that is about to * be reallocated to a new vnode. The buffer must be locked, thus, * no I/O completion operations can occur while we are manipulating * its associated dependencies. The mutex is held so that other I/O's * associated with related dependencies do not occur. */ static int deallocate_dependencies(bp, freeblks, off) struct buf *bp; struct freeblks *freeblks; int off; { struct indirdep *indirdep; struct pagedep *pagedep; struct worklist *wk, *wkn; struct ufsmount *ump; ump = softdep_bp_to_mp(bp); if (ump == NULL) goto done; ACQUIRE_LOCK(ump); LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) { switch (wk->wk_type) { case D_INDIRDEP: indirdep = WK_INDIRDEP(wk); if (bp->b_lblkno >= 0 || bp->b_blkno != indirdep->ir_savebp->b_lblkno) panic("deallocate_dependencies: not indir"); cancel_indirdep(indirdep, bp, freeblks); continue; case D_PAGEDEP: pagedep = WK_PAGEDEP(wk); if (cancel_pagedep(pagedep, freeblks, off)) { FREE_LOCK(ump); return (ERESTART); } continue; case D_ALLOCINDIR: /* * Simply remove the allocindir, we'll find it via * the indirdep where we can clear pointers if * needed. */ WORKLIST_REMOVE(wk); continue; case D_FREEWORK: /* * A truncation is waiting for the zero'd pointers * to be written. It can be freed when the freeblks * is journaled. */ WORKLIST_REMOVE(wk); wk->wk_state |= ONDEPLIST; WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); break; case D_ALLOCDIRECT: if (off != 0) continue; /* FALLTHROUGH */ default: panic("deallocate_dependencies: Unexpected type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } FREE_LOCK(ump); done: /* * Don't throw away this buf, we were partially truncating and * some deps may always remain. */ if (off) { allocbuf(bp, off); bp->b_vflags |= BV_SCANNED; return (EBUSY); } bp->b_flags |= B_INVAL | B_NOCACHE; return (0); } /* * An allocdirect is being canceled due to a truncate. We must make sure * the journal entry is released in concert with the blkfree that releases * the storage. Completed journal entries must not be released until the * space is no longer pointed to by the inode or in the bitmap. */ static void cancel_allocdirect(adphead, adp, freeblks) struct allocdirectlst *adphead; struct allocdirect *adp; struct freeblks *freeblks; { struct freework *freework; struct newblk *newblk; struct worklist *wk; TAILQ_REMOVE(adphead, adp, ad_next); newblk = (struct newblk *)adp; freework = NULL; /* * Find the correct freework structure. */ LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) { if (wk->wk_type != D_FREEWORK) continue; freework = WK_FREEWORK(wk); if (freework->fw_blkno == newblk->nb_newblkno) break; } if (freework == NULL) panic("cancel_allocdirect: Freework not found"); /* * If a newblk exists at all we still have the journal entry that * initiated the allocation so we do not need to journal the free. */ cancel_jfreeblk(freeblks, freework->fw_blkno); /* * If the journal hasn't been written the jnewblk must be passed * to the call to ffs_blkfree that reclaims the space. We accomplish * this by linking the journal dependency into the freework to be * freed when freework_freeblock() is called. If the journal has * been written we can simply reclaim the journal space when the * freeblks work is complete. */ freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list, &freeblks->fb_jwork); WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); } /* * Cancel a new block allocation. May be an indirect or direct block. We * remove it from various lists and return any journal record that needs to * be resolved by the caller. * * A special consideration is made for indirects which were never pointed * at on disk and will never be found once this block is released. */ static struct jnewblk * cancel_newblk(newblk, wk, wkhd) struct newblk *newblk; struct worklist *wk; struct workhead *wkhd; { struct jnewblk *jnewblk; CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno); newblk->nb_state |= GOINGAWAY; /* * Previously we traversed the completedhd on each indirdep * attached to this newblk to cancel them and gather journal * work. Since we need only the oldest journal segment and * the lowest point on the tree will always have the oldest * journal segment we are free to release the segments * of any subordinates and may leave the indirdep list to * indirdep_complete() when this newblk is freed. */ if (newblk->nb_state & ONDEPLIST) { newblk->nb_state &= ~ONDEPLIST; LIST_REMOVE(newblk, nb_deps); } if (newblk->nb_state & ONWORKLIST) WORKLIST_REMOVE(&newblk->nb_list); /* * If the journal entry hasn't been written we save a pointer to * the dependency that frees it until it is written or the * superseding operation completes. */ jnewblk = newblk->nb_jnewblk; if (jnewblk != NULL && wk != NULL) { newblk->nb_jnewblk = NULL; jnewblk->jn_dep = wk; } if (!LIST_EMPTY(&newblk->nb_jwork)) jwork_move(wkhd, &newblk->nb_jwork); /* * When truncating we must free the newdirblk early to remove * the pagedep from the hash before returning. */ if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) free_newdirblk(WK_NEWDIRBLK(wk)); if (!LIST_EMPTY(&newblk->nb_newdirblk)) panic("cancel_newblk: extra newdirblk"); return (jnewblk); } /* * Schedule the freefrag associated with a newblk to be released once * the pointers are written and the previous block is no longer needed. */ static void newblk_freefrag(newblk) struct newblk *newblk; { struct freefrag *freefrag; if (newblk->nb_freefrag == NULL) return; freefrag = newblk->nb_freefrag; newblk->nb_freefrag = NULL; freefrag->ff_state |= COMPLETE; if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) add_to_worklist(&freefrag->ff_list, 0); } /* * Free a newblk. Generate a new freefrag work request if appropriate. * This must be called after the inode pointer and any direct block pointers * are valid or fully removed via truncate or frag extension. */ static void free_newblk(newblk) struct newblk *newblk; { struct indirdep *indirdep; struct worklist *wk; KASSERT(newblk->nb_jnewblk == NULL, ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk)); KASSERT(newblk->nb_list.wk_type != D_NEWBLK, ("free_newblk: unclaimed newblk")); LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp)); newblk_freefrag(newblk); if (newblk->nb_state & ONDEPLIST) LIST_REMOVE(newblk, nb_deps); if (newblk->nb_state & ONWORKLIST) WORKLIST_REMOVE(&newblk->nb_list); LIST_REMOVE(newblk, nb_hash); if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) free_newdirblk(WK_NEWDIRBLK(wk)); if (!LIST_EMPTY(&newblk->nb_newdirblk)) panic("free_newblk: extra newdirblk"); while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL) indirdep_complete(indirdep); handle_jwork(&newblk->nb_jwork); WORKITEM_FREE(newblk, D_NEWBLK); } /* * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. * This routine must be called with splbio interrupts blocked. */ static void free_newdirblk(newdirblk) struct newdirblk *newdirblk; { struct pagedep *pagedep; struct diradd *dap; struct worklist *wk; LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp)); WORKLIST_REMOVE(&newdirblk->db_list); /* * If the pagedep is still linked onto the directory buffer * dependency chain, then some of the entries on the * pd_pendinghd list may not be committed to disk yet. In * this case, we will simply clear the NEWBLOCK flag and * let the pd_pendinghd list be processed when the pagedep * is next written. If the pagedep is no longer on the buffer * dependency chain, then all the entries on the pd_pending * list are committed to disk and we can free them here. */ pagedep = newdirblk->db_pagedep; pagedep->pd_state &= ~NEWBLOCK; if ((pagedep->pd_state & ONWORKLIST) == 0) { while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) free_diradd(dap, NULL); /* * If no dependencies remain, the pagedep will be freed. */ free_pagedep(pagedep); } /* Should only ever be one item in the list. */ while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) { WORKLIST_REMOVE(wk); handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); } WORKITEM_FREE(newdirblk, D_NEWDIRBLK); } /* * Prepare an inode to be freed. The actual free operation is not * done until the zero'ed inode has been written to disk. */ void softdep_freefile(pvp, ino, mode) struct vnode *pvp; ino_t ino; int mode; { struct inode *ip = VTOI(pvp); struct inodedep *inodedep; struct freefile *freefile; struct freeblks *freeblks; struct ufsmount *ump; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_freefile called on non-softdep filesystem")); /* * This sets up the inode de-allocation dependency. */ freefile = malloc(sizeof(struct freefile), M_FREEFILE, M_SOFTDEP_FLAGS); workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount); freefile->fx_mode = mode; freefile->fx_oldinum = ino; freefile->fx_devvp = ump->um_devvp; LIST_INIT(&freefile->fx_jwork); UFS_LOCK(ump); ump->um_fs->fs_pendinginodes += 1; UFS_UNLOCK(ump); /* * If the inodedep does not exist, then the zero'ed inode has * been written to disk. If the allocated inode has never been * written to disk, then the on-disk inode is zero'ed. In either * case we can free the file immediately. If the journal was * canceled before being written the inode will never make it to * disk and we must send the canceled journal entrys to * ffs_freefile() to be cleared in conjunction with the bitmap. * Any blocks waiting on the inode to write can be safely freed * here as it will never been written. */ ACQUIRE_LOCK(ump); inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); if (inodedep) { /* * Clear out freeblks that no longer need to reference * this inode. */ while ((freeblks = TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) { TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); freeblks->fb_state &= ~ONDEPLIST; } /* * Remove this inode from the unlinked list. */ if (inodedep->id_state & UNLINKED) { /* * Save the journal work to be freed with the bitmap * before we clear UNLINKED. Otherwise it can be lost * if the inode block is written. */ handle_bufwait(inodedep, &freefile->fx_jwork); clear_unlinked_inodedep(inodedep); /* * Re-acquire inodedep as we've dropped the * per-filesystem lock in clear_unlinked_inodedep(). */ inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); } } if (inodedep == NULL || check_inode_unwritten(inodedep)) { FREE_LOCK(ump); handle_workitem_freefile(freefile); return; } if ((inodedep->id_state & DEPCOMPLETE) == 0) inodedep->id_state |= GOINGAWAY; WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); FREE_LOCK(ump); if (ip->i_number == ino) ip->i_flag |= IN_MODIFIED; } /* * Check to see if an inode has never been written to disk. If * so free the inodedep and return success, otherwise return failure. * This routine must be called with splbio interrupts blocked. * * If we still have a bitmap dependency, then the inode has never * been written to disk. Drop the dependency as it is no longer * necessary since the inode is being deallocated. We set the * ALLCOMPLETE flags since the bitmap now properly shows that the * inode is not allocated. Even if the inode is actively being * written, it has been rolled back to its zero'ed state, so we * are ensured that a zero inode is what is on the disk. For short * lived files, this change will usually result in removing all the * dependencies from the inode so that it can be freed immediately. */ static int check_inode_unwritten(inodedep) struct inodedep *inodedep; { LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 || !LIST_EMPTY(&inodedep->id_dirremhd) || !LIST_EMPTY(&inodedep->id_pendinghd) || !LIST_EMPTY(&inodedep->id_bufwait) || !LIST_EMPTY(&inodedep->id_inowait) || !TAILQ_EMPTY(&inodedep->id_inoreflst) || !TAILQ_EMPTY(&inodedep->id_inoupdt) || !TAILQ_EMPTY(&inodedep->id_newinoupdt) || !TAILQ_EMPTY(&inodedep->id_extupdt) || !TAILQ_EMPTY(&inodedep->id_newextupdt) || !TAILQ_EMPTY(&inodedep->id_freeblklst) || inodedep->id_mkdiradd != NULL || inodedep->id_nlinkdelta != 0) return (0); /* * Another process might be in initiate_write_inodeblock_ufs[12] * trying to allocate memory without holding "Softdep Lock". */ if ((inodedep->id_state & IOSTARTED) != 0 && inodedep->id_savedino1 == NULL) return (0); if (inodedep->id_state & ONDEPLIST) LIST_REMOVE(inodedep, id_deps); inodedep->id_state &= ~ONDEPLIST; inodedep->id_state |= ALLCOMPLETE; inodedep->id_bmsafemap = NULL; if (inodedep->id_state & ONWORKLIST) WORKLIST_REMOVE(&inodedep->id_list); if (inodedep->id_savedino1 != NULL) { free(inodedep->id_savedino1, M_SAVEDINO); inodedep->id_savedino1 = NULL; } if (free_inodedep(inodedep) == 0) panic("check_inode_unwritten: busy inode"); return (1); } static int check_inodedep_free(inodedep) struct inodedep *inodedep; { LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || !LIST_EMPTY(&inodedep->id_dirremhd) || !LIST_EMPTY(&inodedep->id_pendinghd) || !LIST_EMPTY(&inodedep->id_bufwait) || !LIST_EMPTY(&inodedep->id_inowait) || !TAILQ_EMPTY(&inodedep->id_inoreflst) || !TAILQ_EMPTY(&inodedep->id_inoupdt) || !TAILQ_EMPTY(&inodedep->id_newinoupdt) || !TAILQ_EMPTY(&inodedep->id_extupdt) || !TAILQ_EMPTY(&inodedep->id_newextupdt) || !TAILQ_EMPTY(&inodedep->id_freeblklst) || inodedep->id_mkdiradd != NULL || inodedep->id_nlinkdelta != 0 || inodedep->id_savedino1 != NULL) return (0); return (1); } /* * Try to free an inodedep structure. Return 1 if it could be freed. */ static int free_inodedep(inodedep) struct inodedep *inodedep; { LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 || !check_inodedep_free(inodedep)) return (0); if (inodedep->id_state & ONDEPLIST) LIST_REMOVE(inodedep, id_deps); LIST_REMOVE(inodedep, id_hash); WORKITEM_FREE(inodedep, D_INODEDEP); return (1); } /* * Free the block referenced by a freework structure. The parent freeblks * structure is released and completed when the final cg bitmap reaches * the disk. This routine may be freeing a jnewblk which never made it to * disk in which case we do not have to wait as the operation is undone * in memory immediately. */ static void freework_freeblock(freework, key) struct freework *freework; u_long key; { struct freeblks *freeblks; struct jnewblk *jnewblk; struct ufsmount *ump; struct workhead wkhd; struct fs *fs; int bsize; int needj; ump = VFSTOUFS(freework->fw_list.wk_mp); LOCK_OWNED(ump); /* * Handle partial truncate separately. */ if (freework->fw_indir) { complete_trunc_indir(freework); return; } freeblks = freework->fw_freeblks; fs = ump->um_fs; needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0; bsize = lfragtosize(fs, freework->fw_frags); LIST_INIT(&wkhd); /* * DEPCOMPLETE is cleared in indirblk_insert() if the block lives * on the indirblk hashtable and prevents premature freeing. */ freework->fw_state |= DEPCOMPLETE; /* * SUJ needs to wait for the segment referencing freed indirect * blocks to expire so that we know the checker will not confuse * a re-allocated indirect block with its old contents. */ if (needj && freework->fw_lbn <= -UFS_NDADDR) indirblk_insert(freework); /* * If we are canceling an existing jnewblk pass it to the free * routine, otherwise pass the freeblk which will ultimately * release the freeblks. If we're not journaling, we can just * free the freeblks immediately. */ jnewblk = freework->fw_jnewblk; if (jnewblk != NULL) { cancel_jnewblk(jnewblk, &wkhd); needj = 0; } else if (needj) { freework->fw_state |= DELAYEDFREE; freeblks->fb_cgwait++; WORKLIST_INSERT(&wkhd, &freework->fw_list); } FREE_LOCK(ump); freeblks_free(ump, freeblks, btodb(bsize)); CTR4(KTR_SUJ, "freework_freeblock: ino %jd blkno %jd lbn %jd size %d", freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize); ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize, freeblks->fb_inum, freeblks->fb_vtype, &wkhd, key); ACQUIRE_LOCK(ump); /* * The jnewblk will be discarded and the bits in the map never * made it to disk. We can immediately free the freeblk. */ if (needj == 0) handle_written_freework(freework); } /* * We enqueue freework items that need processing back on the freeblks and * add the freeblks to the worklist. This makes it easier to find all work * required to flush a truncation in process_truncates(). */ static void freework_enqueue(freework) struct freework *freework; { struct freeblks *freeblks; freeblks = freework->fw_freeblks; if ((freework->fw_state & INPROGRESS) == 0) WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); if ((freeblks->fb_state & (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) add_to_worklist(&freeblks->fb_list, WK_NODELAY); } /* * Start, continue, or finish the process of freeing an indirect block tree. * The free operation may be paused at any point with fw_off containing the * offset to restart from. This enables us to implement some flow control * for large truncates which may fan out and generate a huge number of * dependencies. */ static void handle_workitem_indirblk(freework) struct freework *freework; { struct freeblks *freeblks; struct ufsmount *ump; struct fs *fs; freeblks = freework->fw_freeblks; ump = VFSTOUFS(freeblks->fb_list.wk_mp); fs = ump->um_fs; if (freework->fw_state & DEPCOMPLETE) { handle_written_freework(freework); return; } if (freework->fw_off == NINDIR(fs)) { freework_freeblock(freework, SINGLETON_KEY); return; } freework->fw_state |= INPROGRESS; FREE_LOCK(ump); indir_trunc(freework, fsbtodb(fs, freework->fw_blkno), freework->fw_lbn); ACQUIRE_LOCK(ump); } /* * Called when a freework structure attached to a cg buf is written. The * ref on either the parent or the freeblks structure is released and * the freeblks is added back to the worklist if there is more work to do. */ static void handle_written_freework(freework) struct freework *freework; { struct freeblks *freeblks; struct freework *parent; freeblks = freework->fw_freeblks; parent = freework->fw_parent; if (freework->fw_state & DELAYEDFREE) freeblks->fb_cgwait--; freework->fw_state |= COMPLETE; if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) WORKITEM_FREE(freework, D_FREEWORK); if (parent) { if (--parent->fw_ref == 0) freework_enqueue(parent); return; } if (--freeblks->fb_ref != 0) return; if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) == ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) add_to_worklist(&freeblks->fb_list, WK_NODELAY); } /* * This workitem routine performs the block de-allocation. * The workitem is added to the pending list after the updated * inode block has been written to disk. As mentioned above, * checks regarding the number of blocks de-allocated (compared * to the number of blocks allocated for the file) are also * performed in this function. */ static int handle_workitem_freeblocks(freeblks, flags) struct freeblks *freeblks; int flags; { struct freework *freework; struct newblk *newblk; struct allocindir *aip; struct ufsmount *ump; struct worklist *wk; u_long key; KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd), ("handle_workitem_freeblocks: Journal entries not written.")); ump = VFSTOUFS(freeblks->fb_list.wk_mp); key = ffs_blkrelease_start(ump, freeblks->fb_devvp, freeblks->fb_inum); ACQUIRE_LOCK(ump); while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) { WORKLIST_REMOVE(wk); switch (wk->wk_type) { case D_DIRREM: wk->wk_state |= COMPLETE; add_to_worklist(wk, 0); continue; case D_ALLOCDIRECT: free_newblk(WK_NEWBLK(wk)); continue; case D_ALLOCINDIR: aip = WK_ALLOCINDIR(wk); freework = NULL; if (aip->ai_state & DELAYEDFREE) { FREE_LOCK(ump); freework = newfreework(ump, freeblks, NULL, aip->ai_lbn, aip->ai_newblkno, ump->um_fs->fs_frag, 0, 0); ACQUIRE_LOCK(ump); } newblk = WK_NEWBLK(wk); if (newblk->nb_jnewblk) { freework->fw_jnewblk = newblk->nb_jnewblk; newblk->nb_jnewblk->jn_dep = &freework->fw_list; newblk->nb_jnewblk = NULL; } free_newblk(newblk); continue; case D_FREEWORK: freework = WK_FREEWORK(wk); if (freework->fw_lbn <= -UFS_NDADDR) handle_workitem_indirblk(freework); else freework_freeblock(freework, key); continue; default: panic("handle_workitem_freeblocks: Unknown type %s", TYPENAME(wk->wk_type)); } } if (freeblks->fb_ref != 0) { freeblks->fb_state &= ~INPROGRESS; wake_worklist(&freeblks->fb_list); freeblks = NULL; } FREE_LOCK(ump); ffs_blkrelease_finish(ump, key); if (freeblks) return handle_complete_freeblocks(freeblks, flags); return (0); } /* * Handle completion of block free via truncate. This allows fs_pending * to track the actual free block count more closely than if we only updated * it at the end. We must be careful to handle cases where the block count * on free was incorrect. */ static void freeblks_free(ump, freeblks, blocks) struct ufsmount *ump; struct freeblks *freeblks; int blocks; { struct fs *fs; ufs2_daddr_t remain; UFS_LOCK(ump); remain = -freeblks->fb_chkcnt; freeblks->fb_chkcnt += blocks; if (remain > 0) { if (remain < blocks) blocks = remain; fs = ump->um_fs; fs->fs_pendingblocks -= blocks; } UFS_UNLOCK(ump); } /* * Once all of the freework workitems are complete we can retire the * freeblocks dependency and any journal work awaiting completion. This * can not be called until all other dependencies are stable on disk. */ static int handle_complete_freeblocks(freeblks, flags) struct freeblks *freeblks; int flags; { struct inodedep *inodedep; struct inode *ip; struct vnode *vp; struct fs *fs; struct ufsmount *ump; ufs2_daddr_t spare; ump = VFSTOUFS(freeblks->fb_list.wk_mp); fs = ump->um_fs; flags = LK_EXCLUSIVE | flags; spare = freeblks->fb_chkcnt; /* * If we did not release the expected number of blocks we may have * to adjust the inode block count here. Only do so if it wasn't * a truncation to zero and the modrev still matches. */ if (spare && freeblks->fb_len != 0) { if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum, flags, &vp, FFSV_FORCEINSMQ) != 0) return (EBUSY); ip = VTOI(vp); if (DIP(ip, i_modrev) == freeblks->fb_modrev) { DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare); ip->i_flag |= IN_CHANGE; /* * We must wait so this happens before the * journal is reclaimed. */ ffs_update(vp, 1); } vput(vp); } if (spare < 0) { UFS_LOCK(ump); fs->fs_pendingblocks += spare; UFS_UNLOCK(ump); } #ifdef QUOTA /* Handle spare. */ if (spare) quotaadj(freeblks->fb_quota, ump, -spare); quotarele(freeblks->fb_quota); #endif ACQUIRE_LOCK(ump); if (freeblks->fb_state & ONDEPLIST) { inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum, 0, &inodedep); TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); freeblks->fb_state &= ~ONDEPLIST; if (TAILQ_EMPTY(&inodedep->id_freeblklst)) free_inodedep(inodedep); } /* * All of the freeblock deps must be complete prior to this call * so it's now safe to complete earlier outstanding journal entries. */ handle_jwork(&freeblks->fb_jwork); WORKITEM_FREE(freeblks, D_FREEBLKS); FREE_LOCK(ump); return (0); } /* * Release blocks associated with the freeblks and stored in the indirect * block dbn. If level is greater than SINGLE, the block is an indirect block * and recursive calls to indirtrunc must be used to cleanse other indirect * blocks. * * This handles partial and complete truncation of blocks. Partial is noted * with goingaway == 0. In this case the freework is completed after the * zero'd indirects are written to disk. For full truncation the freework * is completed after the block is freed. */ static void indir_trunc(freework, dbn, lbn) struct freework *freework; ufs2_daddr_t dbn; ufs_lbn_t lbn; { struct freework *nfreework; struct workhead wkhd; struct freeblks *freeblks; struct buf *bp; struct fs *fs; struct indirdep *indirdep; struct ufsmount *ump; ufs1_daddr_t *bap1; ufs2_daddr_t nb, nnb, *bap2; ufs_lbn_t lbnadd, nlbn; u_long key; int nblocks, ufs1fmt, freedblocks; int goingaway, freedeps, needj, level, cnt, i; freeblks = freework->fw_freeblks; ump = VFSTOUFS(freeblks->fb_list.wk_mp); fs = ump->um_fs; /* * Get buffer of block pointers to be freed. There are three cases: * * 1) Partial truncate caches the indirdep pointer in the freework * which provides us a back copy to the save bp which holds the * pointers we want to clear. When this completes the zero * pointers are written to the real copy. * 2) The indirect is being completely truncated, cancel_indirdep() * eliminated the real copy and placed the indirdep on the saved * copy. The indirdep and buf are discarded when this completes. * 3) The indirect was not in memory, we read a copy off of the disk * using the devvp and drop and invalidate the buffer when we're * done. */ goingaway = 1; indirdep = NULL; if (freework->fw_indir != NULL) { goingaway = 0; indirdep = freework->fw_indir; bp = indirdep->ir_savebp; if (bp == NULL || bp->b_blkno != dbn) panic("indir_trunc: Bad saved buf %p blkno %jd", bp, (intmax_t)dbn); } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) { /* * The lock prevents the buf dep list from changing and * indirects on devvp should only ever have one dependency. */ indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep)); if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0) panic("indir_trunc: Bad indirdep %p from buf %p", indirdep, bp); } else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, NOCRED, &bp) != 0) { brelse(bp); return; } ACQUIRE_LOCK(ump); /* Protects against a race with complete_trunc_indir(). */ freework->fw_state &= ~INPROGRESS; /* * If we have an indirdep we need to enforce the truncation order * and discard it when it is complete. */ if (indirdep) { if (freework != TAILQ_FIRST(&indirdep->ir_trunc) && !TAILQ_EMPTY(&indirdep->ir_trunc)) { /* * Add the complete truncate to the list on the * indirdep to enforce in-order processing. */ if (freework->fw_indir == NULL) TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); FREE_LOCK(ump); return; } /* * If we're goingaway, free the indirdep. Otherwise it will * linger until the write completes. */ if (goingaway) free_indirdep(indirdep); } FREE_LOCK(ump); /* Initialize pointers depending on block size. */ if (ump->um_fstype == UFS1) { bap1 = (ufs1_daddr_t *)bp->b_data; nb = bap1[freework->fw_off]; ufs1fmt = 1; bap2 = NULL; } else { bap2 = (ufs2_daddr_t *)bp->b_data; nb = bap2[freework->fw_off]; ufs1fmt = 0; bap1 = NULL; } level = lbn_level(lbn); needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0; lbnadd = lbn_offset(fs, level); nblocks = btodb(fs->fs_bsize); nfreework = freework; freedeps = 0; cnt = 0; /* * Reclaim blocks. Traverses into nested indirect levels and * arranges for the current level to be freed when subordinates * are free when journaling. */ key = ffs_blkrelease_start(ump, freeblks->fb_devvp, freeblks->fb_inum); for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) { if (i != NINDIR(fs) - 1) { if (ufs1fmt) nnb = bap1[i+1]; else nnb = bap2[i+1]; } else nnb = 0; if (nb == 0) continue; cnt++; if (level != 0) { nlbn = (lbn + 1) - (i * lbnadd); if (needj != 0) { nfreework = newfreework(ump, freeblks, freework, nlbn, nb, fs->fs_frag, 0, 0); freedeps++; } indir_trunc(nfreework, fsbtodb(fs, nb), nlbn); } else { struct freedep *freedep; /* * Attempt to aggregate freedep dependencies for * all blocks being released to the same CG. */ LIST_INIT(&wkhd); if (needj != 0 && (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) { freedep = newfreedep(freework); WORKLIST_INSERT_UNLOCKED(&wkhd, &freedep->fd_list); freedeps++; } CTR3(KTR_SUJ, "indir_trunc: ino %jd blkno %jd size %d", freeblks->fb_inum, nb, fs->fs_bsize); ffs_blkfree(ump, fs, freeblks->fb_devvp, nb, fs->fs_bsize, freeblks->fb_inum, freeblks->fb_vtype, &wkhd, key); } } ffs_blkrelease_finish(ump, key); if (goingaway) { bp->b_flags |= B_INVAL | B_NOCACHE; brelse(bp); } freedblocks = 0; if (level == 0) freedblocks = (nblocks * cnt); if (needj == 0) freedblocks += nblocks; freeblks_free(ump, freeblks, freedblocks); /* * If we are journaling set up the ref counts and offset so this * indirect can be completed when its children are free. */ if (needj) { ACQUIRE_LOCK(ump); freework->fw_off = i; freework->fw_ref += freedeps; freework->fw_ref -= NINDIR(fs) + 1; if (level == 0) freeblks->fb_cgwait += freedeps; if (freework->fw_ref == 0) freework_freeblock(freework, SINGLETON_KEY); FREE_LOCK(ump); return; } /* * If we're not journaling we can free the indirect now. */ dbn = dbtofsb(fs, dbn); CTR3(KTR_SUJ, "indir_trunc 2: ino %jd blkno %jd size %d", freeblks->fb_inum, dbn, fs->fs_bsize); ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize, freeblks->fb_inum, freeblks->fb_vtype, NULL, SINGLETON_KEY); /* Non SUJ softdep does single-threaded truncations. */ if (freework->fw_blkno == dbn) { freework->fw_state |= ALLCOMPLETE; ACQUIRE_LOCK(ump); handle_written_freework(freework); FREE_LOCK(ump); } return; } /* * Cancel an allocindir when it is removed via truncation. When bp is not * NULL the indirect never appeared on disk and is scheduled to be freed * independently of the indir so we can more easily track journal work. */ static void cancel_allocindir(aip, bp, freeblks, trunc) struct allocindir *aip; struct buf *bp; struct freeblks *freeblks; int trunc; { struct indirdep *indirdep; struct freefrag *freefrag; struct newblk *newblk; newblk = (struct newblk *)aip; LIST_REMOVE(aip, ai_next); /* * We must eliminate the pointer in bp if it must be freed on its * own due to partial truncate or pending journal work. */ if (bp && (trunc || newblk->nb_jnewblk)) { /* * Clear the pointer and mark the aip to be freed * directly if it never existed on disk. */ aip->ai_state |= DELAYEDFREE; indirdep = aip->ai_indirdep; if (indirdep->ir_state & UFS1FMT) ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0; else ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0; } /* * When truncating the previous pointer will be freed via * savedbp. Eliminate the freefrag which would dup free. */ if (trunc && (freefrag = newblk->nb_freefrag) != NULL) { newblk->nb_freefrag = NULL; if (freefrag->ff_jdep) cancel_jfreefrag( WK_JFREEFRAG(freefrag->ff_jdep)); jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork); WORKITEM_FREE(freefrag, D_FREEFRAG); } /* * If the journal hasn't been written the jnewblk must be passed * to the call to ffs_blkfree that reclaims the space. We accomplish * this by leaving the journal dependency on the newblk to be freed * when a freework is created in handle_workitem_freeblocks(). */ cancel_newblk(newblk, NULL, &freeblks->fb_jwork); WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); } /* * Create the mkdir dependencies for . and .. in a new directory. Link them * in to a newdirblk so any subsequent additions are tracked properly. The * caller is responsible for adding the mkdir1 dependency to the journal * and updating id_mkdiradd. This function returns with the per-filesystem * lock held. */ static struct mkdir * setup_newdir(dap, newinum, dinum, newdirbp, mkdirp) struct diradd *dap; ino_t newinum; ino_t dinum; struct buf *newdirbp; struct mkdir **mkdirp; { struct newblk *newblk; struct pagedep *pagedep; struct inodedep *inodedep; struct newdirblk *newdirblk; struct mkdir *mkdir1, *mkdir2; struct worklist *wk; struct jaddref *jaddref; struct ufsmount *ump; struct mount *mp; mp = dap->da_list.wk_mp; ump = VFSTOUFS(mp); newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, M_SOFTDEP_FLAGS); workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); LIST_INIT(&newdirblk->db_mkdir); mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); workitem_alloc(&mkdir1->md_list, D_MKDIR, mp); mkdir1->md_state = ATTACHED | MKDIR_BODY; mkdir1->md_diradd = dap; mkdir1->md_jaddref = NULL; mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); workitem_alloc(&mkdir2->md_list, D_MKDIR, mp); mkdir2->md_state = ATTACHED | MKDIR_PARENT; mkdir2->md_diradd = dap; mkdir2->md_jaddref = NULL; if (MOUNTEDSUJ(mp) == 0) { mkdir1->md_state |= DEPCOMPLETE; mkdir2->md_state |= DEPCOMPLETE; } /* * Dependency on "." and ".." being written to disk. */ mkdir1->md_buf = newdirbp; ACQUIRE_LOCK(VFSTOUFS(mp)); LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs); /* * We must link the pagedep, allocdirect, and newdirblk for * the initial file page so the pointer to the new directory * is not written until the directory contents are live and * any subsequent additions are not marked live until the * block is reachable via the inode. */ if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0) panic("setup_newdir: lost pagedep"); LIST_FOREACH(wk, &newdirbp->b_dep, wk_list) if (wk->wk_type == D_ALLOCDIRECT) break; if (wk == NULL) panic("setup_newdir: lost allocdirect"); if (pagedep->pd_state & NEWBLOCK) panic("setup_newdir: NEWBLOCK already set"); newblk = WK_NEWBLK(wk); pagedep->pd_state |= NEWBLOCK; pagedep->pd_newdirblk = newdirblk; newdirblk->db_pagedep = pagedep; WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list); /* * Look up the inodedep for the parent directory so that we * can link mkdir2 into the pending dotdot jaddref or * the inode write if there is none. If the inode is * ALLCOMPLETE and no jaddref is present all dependencies have * been satisfied and mkdir2 can be freed. */ inodedep_lookup(mp, dinum, 0, &inodedep); if (MOUNTEDSUJ(mp)) { if (inodedep == NULL) panic("setup_newdir: Lost parent."); jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL && jaddref->ja_parent == newinum && (jaddref->ja_state & MKDIR_PARENT), ("setup_newdir: bad dotdot jaddref %p", jaddref)); LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); mkdir2->md_jaddref = jaddref; jaddref->ja_mkdir = mkdir2; } else if (inodedep == NULL || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { dap->da_state &= ~MKDIR_PARENT; WORKITEM_FREE(mkdir2, D_MKDIR); mkdir2 = NULL; } else { LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list); } *mkdirp = mkdir2; return (mkdir1); } /* * Directory entry addition dependencies. * * When adding a new directory entry, the inode (with its incremented link * count) must be written to disk before the directory entry's pointer to it. * Also, if the inode is newly allocated, the corresponding freemap must be * updated (on disk) before the directory entry's pointer. These requirements * are met via undo/redo on the directory entry's pointer, which consists * simply of the inode number. * * As directory entries are added and deleted, the free space within a * directory block can become fragmented. The ufs filesystem will compact * a fragmented directory block to make space for a new entry. When this * occurs, the offsets of previously added entries change. Any "diradd" * dependency structures corresponding to these entries must be updated with * the new offsets. */ /* * This routine is called after the in-memory inode's link * count has been incremented, but before the directory entry's * pointer to the inode has been set. */ int softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) struct buf *bp; /* buffer containing directory block */ struct inode *dp; /* inode for directory */ off_t diroffset; /* offset of new entry in directory */ ino_t newinum; /* inode referenced by new directory entry */ struct buf *newdirbp; /* non-NULL => contents of new mkdir */ int isnewblk; /* entry is in a newly allocated block */ { int offset; /* offset of new entry within directory block */ ufs_lbn_t lbn; /* block in directory containing new entry */ struct fs *fs; struct diradd *dap; struct newblk *newblk; struct pagedep *pagedep; struct inodedep *inodedep; struct newdirblk *newdirblk; struct mkdir *mkdir1, *mkdir2; struct jaddref *jaddref; struct ufsmount *ump; struct mount *mp; int isindir; mp = ITOVFS(dp); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_directory_add called on non-softdep filesystem")); /* * Whiteouts have no dependencies. */ if (newinum == UFS_WINO) { if (newdirbp != NULL) bdwrite(newdirbp); return (0); } jaddref = NULL; mkdir1 = mkdir2 = NULL; fs = ump->um_fs; lbn = lblkno(fs, diroffset); offset = blkoff(fs, diroffset); dap = malloc(sizeof(struct diradd), M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); workitem_alloc(&dap->da_list, D_DIRADD, mp); dap->da_offset = offset; dap->da_newinum = newinum; dap->da_state = ATTACHED; LIST_INIT(&dap->da_jwork); isindir = bp->b_lblkno >= UFS_NDADDR; newdirblk = NULL; if (isnewblk && (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) { newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, M_SOFTDEP_FLAGS); workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); LIST_INIT(&newdirblk->db_mkdir); } /* * If we're creating a new directory setup the dependencies and set * the dap state to wait for them. Otherwise it's COMPLETE and * we can move on. */ if (newdirbp == NULL) { dap->da_state |= DEPCOMPLETE; ACQUIRE_LOCK(ump); } else { dap->da_state |= MKDIR_BODY | MKDIR_PARENT; mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp, &mkdir2); } /* * Link into parent directory pagedep to await its being written. */ pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep); #ifdef DEBUG if (diradd_lookup(pagedep, offset) != NULL) panic("softdep_setup_directory_add: %p already at off %d\n", diradd_lookup(pagedep, offset), offset); #endif dap->da_pagedep = pagedep; LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, da_pdlist); inodedep_lookup(mp, newinum, DEPALLOC, &inodedep); /* * If we're journaling, link the diradd into the jaddref so it * may be completed after the journal entry is written. Otherwise, * link the diradd into its inodedep. If the inode is not yet * written place it on the bufwait list, otherwise do the post-inode * write processing to put it on the id_pendinghd list. */ if (MOUNTEDSUJ(mp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, ("softdep_setup_directory_add: bad jaddref %p", jaddref)); jaddref->ja_diroff = diroffset; jaddref->ja_diradd = dap; add_to_journal(&jaddref->ja_list); } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) diradd_inode_written(dap, inodedep); else WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); /* * Add the journal entries for . and .. links now that the primary * link is written. */ if (mkdir1 != NULL && MOUNTEDSUJ(mp)) { jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, inoreflst, if_deps); KASSERT(jaddref != NULL && jaddref->ja_ino == jaddref->ja_parent && (jaddref->ja_state & MKDIR_BODY), ("softdep_setup_directory_add: bad dot jaddref %p", jaddref)); mkdir1->md_jaddref = jaddref; jaddref->ja_mkdir = mkdir1; /* * It is important that the dotdot journal entry * is added prior to the dot entry since dot writes * both the dot and dotdot links. These both must * be added after the primary link for the journal * to remain consistent. */ add_to_journal(&mkdir2->md_jaddref->ja_list); add_to_journal(&jaddref->ja_list); } /* * If we are adding a new directory remember this diradd so that if * we rename it we can keep the dot and dotdot dependencies. If * we are adding a new name for an inode that has a mkdiradd we * must be in rename and we have to move the dot and dotdot * dependencies to this new name. The old name is being orphaned * soon. */ if (mkdir1 != NULL) { if (inodedep->id_mkdiradd != NULL) panic("softdep_setup_directory_add: Existing mkdir"); inodedep->id_mkdiradd = dap; } else if (inodedep->id_mkdiradd) merge_diradd(inodedep, dap); if (newdirblk != NULL) { /* * There is nothing to do if we are already tracking * this block. */ if ((pagedep->pd_state & NEWBLOCK) != 0) { WORKITEM_FREE(newdirblk, D_NEWDIRBLK); FREE_LOCK(ump); return (0); } if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk) == 0) panic("softdep_setup_directory_add: lost entry"); WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); pagedep->pd_state |= NEWBLOCK; pagedep->pd_newdirblk = newdirblk; newdirblk->db_pagedep = pagedep; FREE_LOCK(ump); /* * If we extended into an indirect signal direnter to sync. */ if (isindir) return (1); return (0); } FREE_LOCK(ump); return (0); } /* * This procedure is called to change the offset of a directory * entry when compacting a directory block which must be owned * exclusively by the caller. Note that the actual entry movement * must be done in this procedure to ensure that no I/O completions * occur while the move is in progress. */ void softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) struct buf *bp; /* Buffer holding directory block. */ struct inode *dp; /* inode for directory */ caddr_t base; /* address of dp->i_offset */ caddr_t oldloc; /* address of old directory location */ caddr_t newloc; /* address of new directory location */ int entrysize; /* size of directory entry */ { int offset, oldoffset, newoffset; struct pagedep *pagedep; struct jmvref *jmvref; struct diradd *dap; struct direct *de; struct mount *mp; struct ufsmount *ump; ufs_lbn_t lbn; int flags; mp = ITOVFS(dp); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_change_directoryentry_offset called on " "non-softdep filesystem")); de = (struct direct *)oldloc; jmvref = NULL; flags = 0; /* * Moves are always journaled as it would be too complex to * determine if any affected adds or removes are present in the * journal. */ if (MOUNTEDSUJ(mp)) { flags = DEPALLOC; jmvref = newjmvref(dp, de->d_ino, dp->i_offset + (oldloc - base), dp->i_offset + (newloc - base)); } lbn = lblkno(ump->um_fs, dp->i_offset); offset = blkoff(ump->um_fs, dp->i_offset); oldoffset = offset + (oldloc - base); newoffset = offset + (newloc - base); ACQUIRE_LOCK(ump); if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0) goto done; dap = diradd_lookup(pagedep, oldoffset); if (dap) { dap->da_offset = newoffset; newoffset = DIRADDHASH(newoffset); oldoffset = DIRADDHASH(oldoffset); if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE && newoffset != oldoffset) { LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset], dap, da_pdlist); } } done: if (jmvref) { jmvref->jm_pagedep = pagedep; LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps); add_to_journal(&jmvref->jm_list); } bcopy(oldloc, newloc, entrysize); FREE_LOCK(ump); } /* * Move the mkdir dependencies and journal work from one diradd to another * when renaming a directory. The new name must depend on the mkdir deps * completing as the old name did. Directories can only have one valid link * at a time so one must be canonical. */ static void merge_diradd(inodedep, newdap) struct inodedep *inodedep; struct diradd *newdap; { struct diradd *olddap; struct mkdir *mkdir, *nextmd; struct ufsmount *ump; short state; olddap = inodedep->id_mkdiradd; inodedep->id_mkdiradd = newdap; if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { newdap->da_state &= ~DEPCOMPLETE; ump = VFSTOUFS(inodedep->id_list.wk_mp); for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; mkdir = nextmd) { nextmd = LIST_NEXT(mkdir, md_mkdirs); if (mkdir->md_diradd != olddap) continue; mkdir->md_diradd = newdap; state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY); newdap->da_state |= state; olddap->da_state &= ~state; if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) break; } if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) panic("merge_diradd: unfound ref"); } /* * Any mkdir related journal items are not safe to be freed until * the new name is stable. */ jwork_move(&newdap->da_jwork, &olddap->da_jwork); olddap->da_state |= DEPCOMPLETE; complete_diradd(olddap); } /* * Move the diradd to the pending list when all diradd dependencies are * complete. */ static void complete_diradd(dap) struct diradd *dap; { struct pagedep *pagedep; if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { if (dap->da_state & DIRCHG) pagedep = dap->da_previous->dm_pagedep; else pagedep = dap->da_pagedep; LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); } } /* * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal * add entries and conditonally journal the remove. */ static void cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref) struct diradd *dap; struct dirrem *dirrem; struct jremref *jremref; struct jremref *dotremref; struct jremref *dotdotremref; { struct inodedep *inodedep; struct jaddref *jaddref; struct inoref *inoref; struct ufsmount *ump; struct mkdir *mkdir; /* * If no remove references were allocated we're on a non-journaled * filesystem and can skip the cancel step. */ if (jremref == NULL) { free_diradd(dap, NULL); return; } /* * Cancel the primary name an free it if it does not require * journaling. */ if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum, 0, &inodedep) != 0) { /* Abort the addref that reference this diradd. */ TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if (inoref->if_list.wk_type != D_JADDREF) continue; jaddref = (struct jaddref *)inoref; if (jaddref->ja_diradd != dap) continue; if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork) == 0) { free_jremref(jremref); jremref = NULL; } break; } } /* * Cancel subordinate names and free them if they do not require * journaling. */ if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { ump = VFSTOUFS(dap->da_list.wk_mp); LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) { if (mkdir->md_diradd != dap) continue; if ((jaddref = mkdir->md_jaddref) == NULL) continue; mkdir->md_jaddref = NULL; if (mkdir->md_state & MKDIR_PARENT) { if (cancel_jaddref(jaddref, NULL, &dirrem->dm_jwork) == 0) { free_jremref(dotdotremref); dotdotremref = NULL; } } else { if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork) == 0) { free_jremref(dotremref); dotremref = NULL; } } } } if (jremref) journal_jremref(dirrem, jremref, inodedep); if (dotremref) journal_jremref(dirrem, dotremref, inodedep); if (dotdotremref) journal_jremref(dirrem, dotdotremref, NULL); jwork_move(&dirrem->dm_jwork, &dap->da_jwork); free_diradd(dap, &dirrem->dm_jwork); } /* * Free a diradd dependency structure. This routine must be called * with splbio interrupts blocked. */ static void free_diradd(dap, wkhd) struct diradd *dap; struct workhead *wkhd; { struct dirrem *dirrem; struct pagedep *pagedep; struct inodedep *inodedep; struct mkdir *mkdir, *nextmd; struct ufsmount *ump; ump = VFSTOUFS(dap->da_list.wk_mp); LOCK_OWNED(ump); LIST_REMOVE(dap, da_pdlist); if (dap->da_state & ONWORKLIST) WORKLIST_REMOVE(&dap->da_list); if ((dap->da_state & DIRCHG) == 0) { pagedep = dap->da_pagedep; } else { dirrem = dap->da_previous; pagedep = dirrem->dm_pagedep; dirrem->dm_dirinum = pagedep->pd_ino; dirrem->dm_state |= COMPLETE; if (LIST_EMPTY(&dirrem->dm_jremrefhd)) add_to_worklist(&dirrem->dm_list, 0); } if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum, 0, &inodedep) != 0) if (inodedep->id_mkdiradd == dap) inodedep->id_mkdiradd = NULL; if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; mkdir = nextmd) { nextmd = LIST_NEXT(mkdir, md_mkdirs); if (mkdir->md_diradd != dap) continue; dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); LIST_REMOVE(mkdir, md_mkdirs); if (mkdir->md_state & ONWORKLIST) WORKLIST_REMOVE(&mkdir->md_list); if (mkdir->md_jaddref != NULL) panic("free_diradd: Unexpected jaddref"); WORKITEM_FREE(mkdir, D_MKDIR); if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) break; } if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) panic("free_diradd: unfound ref"); } if (inodedep) free_inodedep(inodedep); /* * Free any journal segments waiting for the directory write. */ handle_jwork(&dap->da_jwork); WORKITEM_FREE(dap, D_DIRADD); } /* * Directory entry removal dependencies. * * When removing a directory entry, the entry's inode pointer must be * zero'ed on disk before the corresponding inode's link count is decremented * (possibly freeing the inode for re-use). This dependency is handled by * updating the directory entry but delaying the inode count reduction until * after the directory block has been written to disk. After this point, the * inode count can be decremented whenever it is convenient. */ /* * This routine should be called immediately after removing * a directory entry. The inode's link count should not be * decremented by the calling procedure -- the soft updates * code will do this task when it is safe. */ void softdep_setup_remove(bp, dp, ip, isrmdir) struct buf *bp; /* buffer containing directory block */ struct inode *dp; /* inode for the directory being modified */ struct inode *ip; /* inode for directory entry being removed */ int isrmdir; /* indicates if doing RMDIR */ { struct dirrem *dirrem, *prevdirrem; struct inodedep *inodedep; struct ufsmount *ump; int direct; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_setup_remove called on non-softdep filesystem")); /* * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want * newdirrem() to setup the full directory remove which requires * isrmdir > 1. */ dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); /* * Add the dirrem to the inodedep's pending remove list for quick * discovery later. */ if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0) panic("softdep_setup_remove: Lost inodedep."); KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); dirrem->dm_state |= ONDEPLIST; LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); /* * If the COMPLETE flag is clear, then there were no active * entries and we want to roll back to a zeroed entry until * the new inode is committed to disk. If the COMPLETE flag is * set then we have deleted an entry that never made it to * disk. If the entry we deleted resulted from a name change, * then the old name still resides on disk. We cannot delete * its inode (returned to us in prevdirrem) until the zeroed * directory entry gets to disk. The new inode has never been * referenced on the disk, so can be deleted immediately. */ if ((dirrem->dm_state & COMPLETE) == 0) { LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, dm_next); FREE_LOCK(ump); } else { if (prevdirrem != NULL) LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, prevdirrem, dm_next); dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; direct = LIST_EMPTY(&dirrem->dm_jremrefhd); FREE_LOCK(ump); if (direct) handle_workitem_remove(dirrem, 0); } } /* * Check for an entry matching 'offset' on both the pd_dirraddhd list and the * pd_pendinghd list of a pagedep. */ static struct diradd * diradd_lookup(pagedep, offset) struct pagedep *pagedep; int offset; { struct diradd *dap; LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) if (dap->da_offset == offset) return (dap); LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) if (dap->da_offset == offset) return (dap); return (NULL); } /* * Search for a .. diradd dependency in a directory that is being removed. * If the directory was renamed to a new parent we have a diradd rather * than a mkdir for the .. entry. We need to cancel it now before * it is found in truncate(). */ static struct jremref * cancel_diradd_dotdot(ip, dirrem, jremref) struct inode *ip; struct dirrem *dirrem; struct jremref *jremref; { struct pagedep *pagedep; struct diradd *dap; struct worklist *wk; if (pagedep_lookup(ITOVFS(ip), NULL, ip->i_number, 0, 0, &pagedep) == 0) return (jremref); dap = diradd_lookup(pagedep, DOTDOT_OFFSET); if (dap == NULL) return (jremref); cancel_diradd(dap, dirrem, jremref, NULL, NULL); /* * Mark any journal work as belonging to the parent so it is freed * with the .. reference. */ LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) wk->wk_state |= MKDIR_PARENT; return (NULL); } /* * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to * replace it with a dirrem/diradd pair as a result of re-parenting a * directory. This ensures that we don't simultaneously have a mkdir and * a diradd for the same .. entry. */ static struct jremref * cancel_mkdir_dotdot(ip, dirrem, jremref) struct inode *ip; struct dirrem *dirrem; struct jremref *jremref; { struct inodedep *inodedep; struct jaddref *jaddref; struct ufsmount *ump; struct mkdir *mkdir; struct diradd *dap; struct mount *mp; mp = ITOVFS(ip); if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) return (jremref); dap = inodedep->id_mkdiradd; if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0) return (jremref); ump = VFSTOUFS(inodedep->id_list.wk_mp); for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; mkdir = LIST_NEXT(mkdir, md_mkdirs)) if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT) break; if (mkdir == NULL) panic("cancel_mkdir_dotdot: Unable to find mkdir\n"); if ((jaddref = mkdir->md_jaddref) != NULL) { mkdir->md_jaddref = NULL; jaddref->ja_state &= ~MKDIR_PARENT; if (inodedep_lookup(mp, jaddref->ja_ino, 0, &inodedep) == 0) panic("cancel_mkdir_dotdot: Lost parent inodedep"); if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) { journal_jremref(dirrem, jremref, inodedep); jremref = NULL; } } if (mkdir->md_state & ONWORKLIST) WORKLIST_REMOVE(&mkdir->md_list); mkdir->md_state |= ALLCOMPLETE; complete_mkdir(mkdir); return (jremref); } static void journal_jremref(dirrem, jremref, inodedep) struct dirrem *dirrem; struct jremref *jremref; struct inodedep *inodedep; { if (inodedep == NULL) if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, &inodedep) == 0) panic("journal_jremref: Lost inodedep"); LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps); TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); add_to_journal(&jremref->jr_list); } static void dirrem_journal(dirrem, jremref, dotremref, dotdotremref) struct dirrem *dirrem; struct jremref *jremref; struct jremref *dotremref; struct jremref *dotdotremref; { struct inodedep *inodedep; if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, &inodedep) == 0) panic("dirrem_journal: Lost inodedep"); journal_jremref(dirrem, jremref, inodedep); if (dotremref) journal_jremref(dirrem, dotremref, inodedep); if (dotdotremref) journal_jremref(dirrem, dotdotremref, NULL); } /* * Allocate a new dirrem if appropriate and return it along with * its associated pagedep. Called without a lock, returns with lock. */ static struct dirrem * newdirrem(bp, dp, ip, isrmdir, prevdirremp) struct buf *bp; /* buffer containing directory block */ struct inode *dp; /* inode for the directory being modified */ struct inode *ip; /* inode for directory entry being removed */ int isrmdir; /* indicates if doing RMDIR */ struct dirrem **prevdirremp; /* previously referenced inode, if any */ { int offset; ufs_lbn_t lbn; struct diradd *dap; struct dirrem *dirrem; struct pagedep *pagedep; struct jremref *jremref; struct jremref *dotremref; struct jremref *dotdotremref; struct vnode *dvp; struct ufsmount *ump; /* * Whiteouts have no deletion dependencies. */ if (ip == NULL) panic("newdirrem: whiteout"); dvp = ITOV(dp); ump = ITOUMP(dp); /* * If the system is over its limit and our filesystem is * responsible for more than our share of that usage and * we are not a snapshot, request some inodedep cleanup. * Limiting the number of dirrem structures will also limit * the number of freefile and freeblks structures. */ ACQUIRE_LOCK(ump); if (!IS_SNAPSHOT(ip) && softdep_excess_items(ump, D_DIRREM)) schedule_cleanup(UFSTOVFS(ump)); else FREE_LOCK(ump); dirrem = malloc(sizeof(struct dirrem), M_DIRREM, M_SOFTDEP_FLAGS | M_ZERO); workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount); LIST_INIT(&dirrem->dm_jremrefhd); LIST_INIT(&dirrem->dm_jwork); dirrem->dm_state = isrmdir ? RMDIR : 0; dirrem->dm_oldinum = ip->i_number; *prevdirremp = NULL; /* * Allocate remove reference structures to track journal write * dependencies. We will always have one for the link and * when doing directories we will always have one more for dot. * When renaming a directory we skip the dotdot link change so * this is not needed. */ jremref = dotremref = dotdotremref = NULL; if (DOINGSUJ(dvp)) { if (isrmdir) { jremref = newjremref(dirrem, dp, ip, dp->i_offset, ip->i_effnlink + 2); dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET, ip->i_effnlink + 1); dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET, dp->i_effnlink + 1); dotdotremref->jr_state |= MKDIR_PARENT; } else jremref = newjremref(dirrem, dp, ip, dp->i_offset, ip->i_effnlink + 1); } ACQUIRE_LOCK(ump); lbn = lblkno(ump->um_fs, dp->i_offset); offset = blkoff(ump->um_fs, dp->i_offset); pagedep_lookup(UFSTOVFS(ump), bp, dp->i_number, lbn, DEPALLOC, &pagedep); dirrem->dm_pagedep = pagedep; dirrem->dm_offset = offset; /* * If we're renaming a .. link to a new directory, cancel any * existing MKDIR_PARENT mkdir. If it has already been canceled * the jremref is preserved for any potential diradd in this * location. This can not coincide with a rmdir. */ if (dp->i_offset == DOTDOT_OFFSET) { if (isrmdir) panic("newdirrem: .. directory change during remove?"); jremref = cancel_mkdir_dotdot(dp, dirrem, jremref); } /* * If we're removing a directory search for the .. dependency now and * cancel it. Any pending journal work will be added to the dirrem * to be completed when the workitem remove completes. */ if (isrmdir) dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref); /* * Check for a diradd dependency for the same directory entry. * If present, then both dependencies become obsolete and can * be de-allocated. */ dap = diradd_lookup(pagedep, offset); if (dap == NULL) { /* * Link the jremref structures into the dirrem so they are * written prior to the pagedep. */ if (jremref) dirrem_journal(dirrem, jremref, dotremref, dotdotremref); return (dirrem); } /* * Must be ATTACHED at this point. */ if ((dap->da_state & ATTACHED) == 0) panic("newdirrem: not ATTACHED"); if (dap->da_newinum != ip->i_number) panic("newdirrem: inum %ju should be %ju", (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum); /* * If we are deleting a changed name that never made it to disk, * then return the dirrem describing the previous inode (which * represents the inode currently referenced from this entry on disk). */ if ((dap->da_state & DIRCHG) != 0) { *prevdirremp = dap->da_previous; dap->da_state &= ~DIRCHG; dap->da_pagedep = pagedep; } /* * We are deleting an entry that never made it to disk. * Mark it COMPLETE so we can delete its inode immediately. */ dirrem->dm_state |= COMPLETE; cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref); #ifdef SUJ_DEBUG if (isrmdir == 0) { struct worklist *wk; LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT)) panic("bad wk %p (0x%X)\n", wk, wk->wk_state); } #endif return (dirrem); } /* * Directory entry change dependencies. * * Changing an existing directory entry requires that an add operation * be completed first followed by a deletion. The semantics for the addition * are identical to the description of adding a new entry above except * that the rollback is to the old inode number rather than zero. Once * the addition dependency is completed, the removal is done as described * in the removal routine above. */ /* * This routine should be called immediately after changing * a directory entry. The inode's link count should not be * decremented by the calling procedure -- the soft updates * code will perform this task when it is safe. */ void softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) struct buf *bp; /* buffer containing directory block */ struct inode *dp; /* inode for the directory being modified */ struct inode *ip; /* inode for directory entry being removed */ ino_t newinum; /* new inode number for changed entry */ int isrmdir; /* indicates if doing RMDIR */ { int offset; struct diradd *dap = NULL; struct dirrem *dirrem, *prevdirrem; struct pagedep *pagedep; struct inodedep *inodedep; struct jaddref *jaddref; struct mount *mp; struct ufsmount *ump; mp = ITOVFS(dp); ump = VFSTOUFS(mp); offset = blkoff(ump->um_fs, dp->i_offset); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_directory_change called on non-softdep filesystem")); /* * Whiteouts do not need diradd dependencies. */ if (newinum != UFS_WINO) { dap = malloc(sizeof(struct diradd), M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); workitem_alloc(&dap->da_list, D_DIRADD, mp); dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; dap->da_offset = offset; dap->da_newinum = newinum; LIST_INIT(&dap->da_jwork); } /* * Allocate a new dirrem and ACQUIRE_LOCK. */ dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); pagedep = dirrem->dm_pagedep; /* * The possible values for isrmdir: * 0 - non-directory file rename * 1 - directory rename within same directory * inum - directory rename to new directory of given inode number * When renaming to a new directory, we are both deleting and * creating a new directory entry, so the link count on the new * directory should not change. Thus we do not need the followup * dirrem which is usually done in handle_workitem_remove. We set * the DIRCHG flag to tell handle_workitem_remove to skip the * followup dirrem. */ if (isrmdir > 1) dirrem->dm_state |= DIRCHG; /* * Whiteouts have no additional dependencies, * so just put the dirrem on the correct list. */ if (newinum == UFS_WINO) { if ((dirrem->dm_state & COMPLETE) == 0) { LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, dm_next); } else { dirrem->dm_dirinum = pagedep->pd_ino; if (LIST_EMPTY(&dirrem->dm_jremrefhd)) add_to_worklist(&dirrem->dm_list, 0); } FREE_LOCK(ump); return; } /* * Add the dirrem to the inodedep's pending remove list for quick * discovery later. A valid nlinkdelta ensures that this lookup * will not fail. */ if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) panic("softdep_setup_directory_change: Lost inodedep."); dirrem->dm_state |= ONDEPLIST; LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); /* * If the COMPLETE flag is clear, then there were no active * entries and we want to roll back to the previous inode until * the new inode is committed to disk. If the COMPLETE flag is * set, then we have deleted an entry that never made it to disk. * If the entry we deleted resulted from a name change, then the old * inode reference still resides on disk. Any rollback that we do * needs to be to that old inode (returned to us in prevdirrem). If * the entry we deleted resulted from a create, then there is * no entry on the disk, so we want to roll back to zero rather * than the uncommitted inode. In either of the COMPLETE cases we * want to immediately free the unwritten and unreferenced inode. */ if ((dirrem->dm_state & COMPLETE) == 0) { dap->da_previous = dirrem; } else { if (prevdirrem != NULL) { dap->da_previous = prevdirrem; } else { dap->da_state &= ~DIRCHG; dap->da_pagedep = pagedep; } dirrem->dm_dirinum = pagedep->pd_ino; if (LIST_EMPTY(&dirrem->dm_jremrefhd)) add_to_worklist(&dirrem->dm_list, 0); } /* * Lookup the jaddref for this journal entry. We must finish * initializing it and make the diradd write dependent on it. * If we're not journaling, put it on the id_bufwait list if the * inode is not yet written. If it is written, do the post-inode * write processing to put it on the id_pendinghd list. */ inodedep_lookup(mp, newinum, DEPALLOC, &inodedep); if (MOUNTEDSUJ(mp)) { jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, inoreflst); KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, ("softdep_setup_directory_change: bad jaddref %p", jaddref)); jaddref->ja_diroff = dp->i_offset; jaddref->ja_diradd = dap; LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, da_pdlist); add_to_journal(&jaddref->ja_list); } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { dap->da_state |= COMPLETE; LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); } else { LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, da_pdlist); WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); } /* * If we're making a new name for a directory that has not been * committed when need to move the dot and dotdot references to * this new name. */ if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET) merge_diradd(inodedep, dap); FREE_LOCK(ump); } /* * Called whenever the link count on an inode is changed. * It creates an inode dependency so that the new reference(s) * to the inode cannot be committed to disk until the updated * inode has been written. */ void softdep_change_linkcnt(ip) struct inode *ip; /* the inode with the increased link count */ { struct inodedep *inodedep; struct ufsmount *ump; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_change_linkcnt called on non-softdep filesystem")); ACQUIRE_LOCK(ump); inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep); if (ip->i_nlink < ip->i_effnlink) panic("softdep_change_linkcnt: bad delta"); inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; FREE_LOCK(ump); } /* * Attach a sbdep dependency to the superblock buf so that we can keep * track of the head of the linked list of referenced but unlinked inodes. */ void softdep_setup_sbupdate(ump, fs, bp) struct ufsmount *ump; struct fs *fs; struct buf *bp; { struct sbdep *sbdep; struct worklist *wk; KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_setup_sbupdate called on non-softdep filesystem")); LIST_FOREACH(wk, &bp->b_dep, wk_list) if (wk->wk_type == D_SBDEP) break; if (wk != NULL) return; sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS); workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump)); sbdep->sb_fs = fs; sbdep->sb_ump = ump; ACQUIRE_LOCK(ump); WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list); FREE_LOCK(ump); } /* * Return the first unlinked inodedep which is ready to be the head of the * list. The inodedep and all those after it must have valid next pointers. */ static struct inodedep * first_unlinked_inodedep(ump) struct ufsmount *ump; { struct inodedep *inodedep; struct inodedep *idp; LOCK_OWNED(ump); for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst); inodedep; inodedep = idp) { if ((inodedep->id_state & UNLINKNEXT) == 0) return (NULL); idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0) break; if ((inodedep->id_state & UNLINKPREV) == 0) break; } return (inodedep); } /* * Set the sujfree unlinked head pointer prior to writing a superblock. */ static void initiate_write_sbdep(sbdep) struct sbdep *sbdep; { struct inodedep *inodedep; struct fs *bpfs; struct fs *fs; bpfs = sbdep->sb_fs; fs = sbdep->sb_ump->um_fs; inodedep = first_unlinked_inodedep(sbdep->sb_ump); if (inodedep) { fs->fs_sujfree = inodedep->id_ino; inodedep->id_state |= UNLINKPREV; } else fs->fs_sujfree = 0; bpfs->fs_sujfree = fs->fs_sujfree; } /* * After a superblock is written determine whether it must be written again * due to a changing unlinked list head. */ static int handle_written_sbdep(sbdep, bp) struct sbdep *sbdep; struct buf *bp; { struct inodedep *inodedep; struct fs *fs; LOCK_OWNED(sbdep->sb_ump); fs = sbdep->sb_fs; /* * If the superblock doesn't match the in-memory list start over. */ inodedep = first_unlinked_inodedep(sbdep->sb_ump); if ((inodedep && fs->fs_sujfree != inodedep->id_ino) || (inodedep == NULL && fs->fs_sujfree != 0)) { bdirty(bp); return (1); } WORKITEM_FREE(sbdep, D_SBDEP); if (fs->fs_sujfree == 0) return (0); /* * Now that we have a record of this inode in stable store allow it * to be written to free up pending work. Inodes may see a lot of * write activity after they are unlinked which we must not hold up. */ for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS) panic("handle_written_sbdep: Bad inodedep %p (0x%X)", inodedep, inodedep->id_state); if (inodedep->id_state & UNLINKONLIST) break; inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST; } return (0); } /* * Mark an inodedep as unlinked and insert it into the in-memory unlinked list. */ static void unlinked_inodedep(mp, inodedep) struct mount *mp; struct inodedep *inodedep; { struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); if (MOUNTEDSUJ(mp) == 0) return; ump->um_fs->fs_fmod = 1; if (inodedep->id_state & UNLINKED) panic("unlinked_inodedep: %p already unlinked\n", inodedep); inodedep->id_state |= UNLINKED; TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked); } /* * Remove an inodedep from the unlinked inodedep list. This may require * disk writes if the inode has made it that far. */ static void clear_unlinked_inodedep(inodedep) struct inodedep *inodedep; { + struct ufs2_dinode *dip; struct ufsmount *ump; struct inodedep *idp; struct inodedep *idn; struct fs *fs; struct buf *bp; ino_t ino; ino_t nino; ino_t pino; int error; ump = VFSTOUFS(inodedep->id_list.wk_mp); fs = ump->um_fs; ino = inodedep->id_ino; error = 0; for (;;) { LOCK_OWNED(ump); KASSERT((inodedep->id_state & UNLINKED) != 0, ("clear_unlinked_inodedep: inodedep %p not unlinked", inodedep)); /* * If nothing has yet been written simply remove us from * the in memory list and return. This is the most common * case where handle_workitem_remove() loses the final * reference. */ if ((inodedep->id_state & UNLINKLINKS) == 0) break; /* * If we have a NEXT pointer and no PREV pointer we can simply * clear NEXT's PREV and remove ourselves from the list. Be * careful not to clear PREV if the superblock points at * next as well. */ idn = TAILQ_NEXT(inodedep, id_unlinked); if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) { if (idn && fs->fs_sujfree != idn->id_ino) idn->id_state &= ~UNLINKPREV; break; } /* * Here we have an inodedep which is actually linked into * the list. We must remove it by forcing a write to the * link before us, whether it be the superblock or an inode. * Unfortunately the list may change while we're waiting * on the buf lock for either resource so we must loop until * we lock the right one. If both the superblock and an * inode point to this inode we must clear the inode first * followed by the superblock. */ idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); pino = 0; if (idp && (idp->id_state & UNLINKNEXT)) pino = idp->id_ino; FREE_LOCK(ump); if (pino == 0) { bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), (int)fs->fs_sbsize, 0, 0, 0); } else { error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, pino)), (int)fs->fs_bsize, NOCRED, &bp); if (error) brelse(bp); } ACQUIRE_LOCK(ump); if (error) break; /* If the list has changed restart the loop. */ idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); nino = 0; if (idp && (idp->id_state & UNLINKNEXT)) nino = idp->id_ino; if (nino != pino || (inodedep->id_state & UNLINKPREV) != UNLINKPREV) { FREE_LOCK(ump); brelse(bp); ACQUIRE_LOCK(ump); continue; } nino = 0; idn = TAILQ_NEXT(inodedep, id_unlinked); if (idn) nino = idn->id_ino; /* * Remove us from the in memory list. After this we cannot * access the inodedep. */ KASSERT((inodedep->id_state & UNLINKED) != 0, ("clear_unlinked_inodedep: inodedep %p not unlinked", inodedep)); inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); FREE_LOCK(ump); /* * The predecessor's next pointer is manually updated here * so that the NEXT flag is never cleared for an element * that is in the list. */ if (pino == 0) { bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); ffs_oldfscompat_write((struct fs *)bp->b_data, ump); softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); - } else if (fs->fs_magic == FS_UFS1_MAGIC) + } else if (fs->fs_magic == FS_UFS1_MAGIC) { ((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, pino))->di_freelink = nino; - else - ((struct ufs2_dinode *)bp->b_data + - ino_to_fsbo(fs, pino))->di_freelink = nino; + } else { + dip = (struct ufs2_dinode *)bp->b_data + + ino_to_fsbo(fs, pino); + dip->di_freelink = nino; + } /* * If the bwrite fails we have no recourse to recover. The * filesystem is corrupted already. */ bwrite(bp); ACQUIRE_LOCK(ump); /* * If the superblock pointer still needs to be cleared force * a write here. */ if (fs->fs_sujfree == ino) { FREE_LOCK(ump); bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), (int)fs->fs_sbsize, 0, 0, 0); bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); ffs_oldfscompat_write((struct fs *)bp->b_data, ump); softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); bwrite(bp); ACQUIRE_LOCK(ump); } if (fs->fs_sujfree != ino) return; panic("clear_unlinked_inodedep: Failed to clear free head"); } if (inodedep->id_ino == fs->fs_sujfree) panic("clear_unlinked_inodedep: Freeing head of free list"); inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); return; } /* * This workitem decrements the inode's link count. * If the link count reaches zero, the file is removed. */ static int handle_workitem_remove(dirrem, flags) struct dirrem *dirrem; int flags; { struct inodedep *inodedep; struct workhead dotdotwk; struct worklist *wk; struct ufsmount *ump; struct mount *mp; struct vnode *vp; struct inode *ip; ino_t oldinum; if (dirrem->dm_state & ONWORKLIST) panic("handle_workitem_remove: dirrem %p still on worklist", dirrem); oldinum = dirrem->dm_oldinum; mp = dirrem->dm_list.wk_mp; ump = VFSTOUFS(mp); flags |= LK_EXCLUSIVE; if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0) return (EBUSY); ip = VTOI(vp); ACQUIRE_LOCK(ump); if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0) panic("handle_workitem_remove: lost inodedep"); if (dirrem->dm_state & ONDEPLIST) LIST_REMOVE(dirrem, dm_inonext); KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), ("handle_workitem_remove: Journal entries not written.")); /* * Move all dependencies waiting on the remove to complete * from the dirrem to the inode inowait list to be completed * after the inode has been updated and written to disk. Any * marked MKDIR_PARENT are saved to be completed when the .. ref * is removed. */ LIST_INIT(&dotdotwk); while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) { WORKLIST_REMOVE(wk); if (wk->wk_state & MKDIR_PARENT) { wk->wk_state &= ~MKDIR_PARENT; WORKLIST_INSERT(&dotdotwk, wk); continue; } WORKLIST_INSERT(&inodedep->id_inowait, wk); } LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list); /* * Normal file deletion. */ if ((dirrem->dm_state & RMDIR) == 0) { ip->i_nlink--; DIP_SET(ip, i_nlink, ip->i_nlink); ip->i_flag |= IN_CHANGE; if (ip->i_nlink < ip->i_effnlink) panic("handle_workitem_remove: bad file delta"); if (ip->i_nlink == 0) unlinked_inodedep(mp, inodedep); inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; KASSERT(LIST_EMPTY(&dirrem->dm_jwork), ("handle_workitem_remove: worklist not empty. %s", TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type))); WORKITEM_FREE(dirrem, D_DIRREM); FREE_LOCK(ump); goto out; } /* * Directory deletion. Decrement reference count for both the * just deleted parent directory entry and the reference for ".". * Arrange to have the reference count on the parent decremented * to account for the loss of "..". */ ip->i_nlink -= 2; DIP_SET(ip, i_nlink, ip->i_nlink); ip->i_flag |= IN_CHANGE; if (ip->i_nlink < ip->i_effnlink) panic("handle_workitem_remove: bad dir delta"); if (ip->i_nlink == 0) unlinked_inodedep(mp, inodedep); inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; /* * Rename a directory to a new parent. Since, we are both deleting * and creating a new directory entry, the link count on the new * directory should not change. Thus we skip the followup dirrem. */ if (dirrem->dm_state & DIRCHG) { KASSERT(LIST_EMPTY(&dirrem->dm_jwork), ("handle_workitem_remove: DIRCHG and worklist not empty.")); WORKITEM_FREE(dirrem, D_DIRREM); FREE_LOCK(ump); goto out; } dirrem->dm_state = ONDEPLIST; dirrem->dm_oldinum = dirrem->dm_dirinum; /* * Place the dirrem on the parent's diremhd list. */ if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0) panic("handle_workitem_remove: lost dir inodedep"); LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); /* * If the allocated inode has never been written to disk, then * the on-disk inode is zero'ed and we can remove the file * immediately. When journaling if the inode has been marked * unlinked and not DEPCOMPLETE we know it can never be written. */ inodedep_lookup(mp, oldinum, 0, &inodedep); if (inodedep == NULL || (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED || check_inode_unwritten(inodedep)) { FREE_LOCK(ump); vput(vp); return handle_workitem_remove(dirrem, flags); } WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); FREE_LOCK(ump); ip->i_flag |= IN_CHANGE; out: ffs_update(vp, 0); vput(vp); return (0); } /* * Inode de-allocation dependencies. * * When an inode's link count is reduced to zero, it can be de-allocated. We * found it convenient to postpone de-allocation until after the inode is * written to disk with its new link count (zero). At this point, all of the * on-disk inode's block pointers are nullified and, with careful dependency * list ordering, all dependencies related to the inode will be satisfied and * the corresponding dependency structures de-allocated. So, if/when the * inode is reused, there will be no mixing of old dependencies with new * ones. This artificial dependency is set up by the block de-allocation * procedure above (softdep_setup_freeblocks) and completed by the * following procedure. */ static void handle_workitem_freefile(freefile) struct freefile *freefile; { struct workhead wkhd; struct fs *fs; struct inodedep *idp; struct ufsmount *ump; int error; ump = VFSTOUFS(freefile->fx_list.wk_mp); fs = ump->um_fs; #ifdef DEBUG ACQUIRE_LOCK(ump); error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp); FREE_LOCK(ump); if (error) panic("handle_workitem_freefile: inodedep %p survived", idp); #endif UFS_LOCK(ump); fs->fs_pendinginodes -= 1; UFS_UNLOCK(ump); LIST_INIT(&wkhd); LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list); if ((error = ffs_freefile(ump, fs, freefile->fx_devvp, freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0) softdep_error("handle_workitem_freefile", error); ACQUIRE_LOCK(ump); WORKITEM_FREE(freefile, D_FREEFILE); FREE_LOCK(ump); } /* * Helper function which unlinks marker element from work list and returns * the next element on the list. */ static __inline struct worklist * markernext(struct worklist *marker) { struct worklist *next; next = LIST_NEXT(marker, wk_list); LIST_REMOVE(marker, wk_list); return next; } /* * Disk writes. * * The dependency structures constructed above are most actively used when file * system blocks are written to disk. No constraints are placed on when a * block can be written, but unsatisfied update dependencies are made safe by * modifying (or replacing) the source memory for the duration of the disk * write. When the disk write completes, the memory block is again brought * up-to-date. * * In-core inode structure reclamation. * * Because there are a finite number of "in-core" inode structures, they are * reused regularly. By transferring all inode-related dependencies to the * in-memory inode block and indexing them separately (via "inodedep"s), we * can allow "in-core" inode structures to be reused at any time and avoid * any increase in contention. * * Called just before entering the device driver to initiate a new disk I/O. * The buffer must be locked, thus, no I/O completion operations can occur * while we are manipulating its associated dependencies. */ static void softdep_disk_io_initiation(bp) struct buf *bp; /* structure describing disk write to occur */ { struct worklist *wk; struct worklist marker; struct inodedep *inodedep; struct freeblks *freeblks; struct jblkdep *jblkdep; struct newblk *newblk; struct ufsmount *ump; /* * We only care about write operations. There should never * be dependencies for reads. */ if (bp->b_iocmd != BIO_WRITE) panic("softdep_disk_io_initiation: not write"); if (bp->b_vflags & BV_BKGRDINPROG) panic("softdep_disk_io_initiation: Writing buffer with " "background write in progress: %p", bp); ump = softdep_bp_to_mp(bp); if (ump == NULL) return; marker.wk_type = D_LAST + 1; /* Not a normal workitem */ PHOLD(curproc); /* Don't swap out kernel stack */ ACQUIRE_LOCK(ump); /* * Do any necessary pre-I/O processing. */ for (wk = LIST_FIRST(&bp->b_dep); wk != NULL; wk = markernext(&marker)) { LIST_INSERT_AFTER(wk, &marker, wk_list); switch (wk->wk_type) { case D_PAGEDEP: initiate_write_filepage(WK_PAGEDEP(wk), bp); continue; case D_INODEDEP: inodedep = WK_INODEDEP(wk); if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) initiate_write_inodeblock_ufs1(inodedep, bp); else initiate_write_inodeblock_ufs2(inodedep, bp); continue; case D_INDIRDEP: initiate_write_indirdep(WK_INDIRDEP(wk), bp); continue; case D_BMSAFEMAP: initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp); continue; case D_JSEG: WK_JSEG(wk)->js_buf = NULL; continue; case D_FREEBLKS: freeblks = WK_FREEBLKS(wk); jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd); /* * We have to wait for the freeblks to be journaled * before we can write an inodeblock with updated * pointers. Be careful to arrange the marker so * we revisit the freeblks if it's not removed by * the first jwait(). */ if (jblkdep != NULL) { LIST_REMOVE(&marker, wk_list); LIST_INSERT_BEFORE(wk, &marker, wk_list); jwait(&jblkdep->jb_list, MNT_WAIT); } continue; case D_ALLOCDIRECT: case D_ALLOCINDIR: /* * We have to wait for the jnewblk to be journaled * before we can write to a block if the contents * may be confused with an earlier file's indirect * at recovery time. Handle the marker as described * above. */ newblk = WK_NEWBLK(wk); if (newblk->nb_jnewblk != NULL && indirblk_lookup(newblk->nb_list.wk_mp, newblk->nb_newblkno)) { LIST_REMOVE(&marker, wk_list); LIST_INSERT_BEFORE(wk, &marker, wk_list); jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); } continue; case D_SBDEP: initiate_write_sbdep(WK_SBDEP(wk)); continue; case D_MKDIR: case D_FREEWORK: case D_FREEDEP: case D_JSEGDEP: continue; default: panic("handle_disk_io_initiation: Unexpected type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } FREE_LOCK(ump); PRELE(curproc); /* Allow swapout of kernel stack */ } /* * Called from within the procedure above to deal with unsatisfied * allocation dependencies in a directory. The buffer must be locked, * thus, no I/O completion operations can occur while we are * manipulating its associated dependencies. */ static void initiate_write_filepage(pagedep, bp) struct pagedep *pagedep; struct buf *bp; { struct jremref *jremref; struct jmvref *jmvref; struct dirrem *dirrem; struct diradd *dap; struct direct *ep; int i; if (pagedep->pd_state & IOSTARTED) { /* * This can only happen if there is a driver that does not * understand chaining. Here biodone will reissue the call * to strategy for the incomplete buffers. */ printf("initiate_write_filepage: already started\n"); return; } pagedep->pd_state |= IOSTARTED; /* * Wait for all journal remove dependencies to hit the disk. * We can not allow any potentially conflicting directory adds * to be visible before removes and rollback is too difficult. * The per-filesystem lock may be dropped and re-acquired, however * we hold the buf locked so the dependency can not go away. */ LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) jwait(&jremref->jr_list, MNT_WAIT); while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) jwait(&jmvref->jm_list, MNT_WAIT); for (i = 0; i < DAHASHSZ; i++) { LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { ep = (struct direct *) ((char *)bp->b_data + dap->da_offset); if (ep->d_ino != dap->da_newinum) panic("%s: dir inum %ju != new %ju", "initiate_write_filepage", (uintmax_t)ep->d_ino, (uintmax_t)dap->da_newinum); if (dap->da_state & DIRCHG) ep->d_ino = dap->da_previous->dm_oldinum; else ep->d_ino = 0; dap->da_state &= ~ATTACHED; dap->da_state |= UNDONE; } } } /* * Version of initiate_write_inodeblock that handles UFS1 dinodes. * Note that any bug fixes made to this routine must be done in the * version found below. * * Called from within the procedure above to deal with unsatisfied * allocation dependencies in an inodeblock. The buffer must be * locked, thus, no I/O completion operations can occur while we * are manipulating its associated dependencies. */ static void initiate_write_inodeblock_ufs1(inodedep, bp) struct inodedep *inodedep; struct buf *bp; /* The inode block */ { struct allocdirect *adp, *lastadp; struct ufs1_dinode *dp; struct ufs1_dinode *sip; struct inoref *inoref; struct ufsmount *ump; struct fs *fs; ufs_lbn_t i; #ifdef INVARIANTS ufs_lbn_t prevlbn = 0; #endif int deplist; if (inodedep->id_state & IOSTARTED) panic("initiate_write_inodeblock_ufs1: already started"); inodedep->id_state |= IOSTARTED; fs = inodedep->id_fs; ump = VFSTOUFS(inodedep->id_list.wk_mp); LOCK_OWNED(ump); dp = (struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, inodedep->id_ino); /* * If we're on the unlinked list but have not yet written our * next pointer initialize it here. */ if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { struct inodedep *inon; inon = TAILQ_NEXT(inodedep, id_unlinked); dp->di_freelink = inon ? inon->id_ino : 0; } /* * If the bitmap is not yet written, then the allocated * inode cannot be written to disk. */ if ((inodedep->id_state & DEPCOMPLETE) == 0) { if (inodedep->id_savedino1 != NULL) panic("initiate_write_inodeblock_ufs1: I/O underway"); FREE_LOCK(ump); sip = malloc(sizeof(struct ufs1_dinode), M_SAVEDINO, M_SOFTDEP_FLAGS); ACQUIRE_LOCK(ump); inodedep->id_savedino1 = sip; *inodedep->id_savedino1 = *dp; bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); dp->di_gen = inodedep->id_savedino1->di_gen; dp->di_freelink = inodedep->id_savedino1->di_freelink; return; } /* * If no dependencies, then there is nothing to roll back. */ inodedep->id_savedsize = dp->di_size; inodedep->id_savedextsize = 0; inodedep->id_savednlink = dp->di_nlink; if (TAILQ_EMPTY(&inodedep->id_inoupdt) && TAILQ_EMPTY(&inodedep->id_inoreflst)) return; /* * Revert the link count to that of the first unwritten journal entry. */ inoref = TAILQ_FIRST(&inodedep->id_inoreflst); if (inoref) dp->di_nlink = inoref->if_nlink; /* * Set the dependencies to busy. */ for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = TAILQ_NEXT(adp, ad_next)) { #ifdef INVARIANTS if (deplist != 0 && prevlbn >= adp->ad_offset) panic("softdep_write_inodeblock: lbn order"); prevlbn = adp->ad_offset; if (adp->ad_offset < UFS_NDADDR && dp->di_db[adp->ad_offset] != adp->ad_newblkno) panic("initiate_write_inodeblock_ufs1: " "direct pointer #%jd mismatch %d != %jd", (intmax_t)adp->ad_offset, dp->di_db[adp->ad_offset], (intmax_t)adp->ad_newblkno); if (adp->ad_offset >= UFS_NDADDR && dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno) panic("initiate_write_inodeblock_ufs1: " "indirect pointer #%jd mismatch %d != %jd", (intmax_t)adp->ad_offset - UFS_NDADDR, dp->di_ib[adp->ad_offset - UFS_NDADDR], (intmax_t)adp->ad_newblkno); deplist |= 1 << adp->ad_offset; if ((adp->ad_state & ATTACHED) == 0) panic("initiate_write_inodeblock_ufs1: " "Unknown state 0x%x", adp->ad_state); #endif /* INVARIANTS */ adp->ad_state &= ~ATTACHED; adp->ad_state |= UNDONE; } /* * The on-disk inode cannot claim to be any larger than the last * fragment that has been written. Otherwise, the on-disk inode * might have fragments that were not the last block in the file * which would corrupt the filesystem. */ for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { if (adp->ad_offset >= UFS_NDADDR) break; dp->di_db[adp->ad_offset] = adp->ad_oldblkno; /* keep going until hitting a rollback to a frag */ if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) continue; dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) { #ifdef INVARIANTS if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) panic("initiate_write_inodeblock_ufs1: " "lost dep1"); #endif /* INVARIANTS */ dp->di_db[i] = 0; } for (i = 0; i < UFS_NIADDR; i++) { #ifdef INVARIANTS if (dp->di_ib[i] != 0 && (deplist & ((1 << UFS_NDADDR) << i)) == 0) panic("initiate_write_inodeblock_ufs1: " "lost dep2"); #endif /* INVARIANTS */ dp->di_ib[i] = 0; } return; } /* * If we have zero'ed out the last allocated block of the file, * roll back the size to the last currently allocated block. * We know that this last allocated block is a full-sized as * we already checked for fragments in the loop above. */ if (lastadp != NULL && dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { for (i = lastadp->ad_offset; i >= 0; i--) if (dp->di_db[i] != 0) break; dp->di_size = (i + 1) * fs->fs_bsize; } /* * The only dependencies are for indirect blocks. * * The file size for indirect block additions is not guaranteed. * Such a guarantee would be non-trivial to achieve. The conventional * synchronous write implementation also does not make this guarantee. * Fsck should catch and fix discrepancies. Arguably, the file size * can be over-estimated without destroying integrity when the file * moves into the indirect blocks (i.e., is large). If we want to * postpone fsck, we are stuck with this argument. */ for (; adp; adp = TAILQ_NEXT(adp, ad_next)) dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0; } /* * Version of initiate_write_inodeblock that handles UFS2 dinodes. * Note that any bug fixes made to this routine must be done in the * version found above. * * Called from within the procedure above to deal with unsatisfied * allocation dependencies in an inodeblock. The buffer must be * locked, thus, no I/O completion operations can occur while we * are manipulating its associated dependencies. */ static void initiate_write_inodeblock_ufs2(inodedep, bp) struct inodedep *inodedep; struct buf *bp; /* The inode block */ { struct allocdirect *adp, *lastadp; struct ufs2_dinode *dp; struct ufs2_dinode *sip; struct inoref *inoref; struct ufsmount *ump; struct fs *fs; ufs_lbn_t i; #ifdef INVARIANTS ufs_lbn_t prevlbn = 0; #endif int deplist; if (inodedep->id_state & IOSTARTED) panic("initiate_write_inodeblock_ufs2: already started"); inodedep->id_state |= IOSTARTED; fs = inodedep->id_fs; ump = VFSTOUFS(inodedep->id_list.wk_mp); LOCK_OWNED(ump); dp = (struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, inodedep->id_ino); /* * If we're on the unlinked list but have not yet written our * next pointer initialize it here. */ if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { struct inodedep *inon; inon = TAILQ_NEXT(inodedep, id_unlinked); dp->di_freelink = inon ? inon->id_ino : 0; } /* * If the bitmap is not yet written, then the allocated * inode cannot be written to disk. */ if ((inodedep->id_state & DEPCOMPLETE) == 0) { if (inodedep->id_savedino2 != NULL) panic("initiate_write_inodeblock_ufs2: I/O underway"); FREE_LOCK(ump); sip = malloc(sizeof(struct ufs2_dinode), M_SAVEDINO, M_SOFTDEP_FLAGS); ACQUIRE_LOCK(ump); inodedep->id_savedino2 = sip; *inodedep->id_savedino2 = *dp; bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); dp->di_gen = inodedep->id_savedino2->di_gen; dp->di_freelink = inodedep->id_savedino2->di_freelink; return; } /* * If no dependencies, then there is nothing to roll back. */ inodedep->id_savedsize = dp->di_size; inodedep->id_savedextsize = dp->di_extsize; inodedep->id_savednlink = dp->di_nlink; if (TAILQ_EMPTY(&inodedep->id_inoupdt) && TAILQ_EMPTY(&inodedep->id_extupdt) && TAILQ_EMPTY(&inodedep->id_inoreflst)) return; /* * Revert the link count to that of the first unwritten journal entry. */ inoref = TAILQ_FIRST(&inodedep->id_inoreflst); if (inoref) dp->di_nlink = inoref->if_nlink; /* * Set the ext data dependencies to busy. */ for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = TAILQ_NEXT(adp, ad_next)) { #ifdef INVARIANTS if (deplist != 0 && prevlbn >= adp->ad_offset) panic("initiate_write_inodeblock_ufs2: lbn order"); prevlbn = adp->ad_offset; if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno) panic("initiate_write_inodeblock_ufs2: " "ext pointer #%jd mismatch %jd != %jd", (intmax_t)adp->ad_offset, (intmax_t)dp->di_extb[adp->ad_offset], (intmax_t)adp->ad_newblkno); deplist |= 1 << adp->ad_offset; if ((adp->ad_state & ATTACHED) == 0) panic("initiate_write_inodeblock_ufs2: Unknown " "state 0x%x", adp->ad_state); #endif /* INVARIANTS */ adp->ad_state &= ~ATTACHED; adp->ad_state |= UNDONE; } /* * The on-disk inode cannot claim to be any larger than the last * fragment that has been written. Otherwise, the on-disk inode * might have fragments that were not the last block in the ext * data which would corrupt the filesystem. */ for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { dp->di_extb[adp->ad_offset] = adp->ad_oldblkno; /* keep going until hitting a rollback to a frag */ if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) continue; dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; for (i = adp->ad_offset + 1; i < UFS_NXADDR; i++) { #ifdef INVARIANTS if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) panic("initiate_write_inodeblock_ufs2: " "lost dep1"); #endif /* INVARIANTS */ dp->di_extb[i] = 0; } lastadp = NULL; break; } /* * If we have zero'ed out the last allocated block of the ext * data, roll back the size to the last currently allocated block. * We know that this last allocated block is a full-sized as * we already checked for fragments in the loop above. */ if (lastadp != NULL && dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) { for (i = lastadp->ad_offset; i >= 0; i--) if (dp->di_extb[i] != 0) break; dp->di_extsize = (i + 1) * fs->fs_bsize; } /* * Set the file data dependencies to busy. */ for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = TAILQ_NEXT(adp, ad_next)) { #ifdef INVARIANTS if (deplist != 0 && prevlbn >= adp->ad_offset) panic("softdep_write_inodeblock: lbn order"); if ((adp->ad_state & ATTACHED) == 0) panic("inodedep %p and adp %p not attached", inodedep, adp); prevlbn = adp->ad_offset; if (adp->ad_offset < UFS_NDADDR && dp->di_db[adp->ad_offset] != adp->ad_newblkno) panic("initiate_write_inodeblock_ufs2: " "direct pointer #%jd mismatch %jd != %jd", (intmax_t)adp->ad_offset, (intmax_t)dp->di_db[adp->ad_offset], (intmax_t)adp->ad_newblkno); if (adp->ad_offset >= UFS_NDADDR && dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno) panic("initiate_write_inodeblock_ufs2: " "indirect pointer #%jd mismatch %jd != %jd", (intmax_t)adp->ad_offset - UFS_NDADDR, (intmax_t)dp->di_ib[adp->ad_offset - UFS_NDADDR], (intmax_t)adp->ad_newblkno); deplist |= 1 << adp->ad_offset; if ((adp->ad_state & ATTACHED) == 0) panic("initiate_write_inodeblock_ufs2: Unknown " "state 0x%x", adp->ad_state); #endif /* INVARIANTS */ adp->ad_state &= ~ATTACHED; adp->ad_state |= UNDONE; } /* * The on-disk inode cannot claim to be any larger than the last * fragment that has been written. Otherwise, the on-disk inode * might have fragments that were not the last block in the file * which would corrupt the filesystem. */ for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { if (adp->ad_offset >= UFS_NDADDR) break; dp->di_db[adp->ad_offset] = adp->ad_oldblkno; /* keep going until hitting a rollback to a frag */ if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) continue; dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) { #ifdef INVARIANTS if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) panic("initiate_write_inodeblock_ufs2: " "lost dep2"); #endif /* INVARIANTS */ dp->di_db[i] = 0; } for (i = 0; i < UFS_NIADDR; i++) { #ifdef INVARIANTS if (dp->di_ib[i] != 0 && (deplist & ((1 << UFS_NDADDR) << i)) == 0) panic("initiate_write_inodeblock_ufs2: " "lost dep3"); #endif /* INVARIANTS */ dp->di_ib[i] = 0; } return; } /* * If we have zero'ed out the last allocated block of the file, * roll back the size to the last currently allocated block. * We know that this last allocated block is a full-sized as * we already checked for fragments in the loop above. */ if (lastadp != NULL && dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { for (i = lastadp->ad_offset; i >= 0; i--) if (dp->di_db[i] != 0) break; dp->di_size = (i + 1) * fs->fs_bsize; } /* * The only dependencies are for indirect blocks. * * The file size for indirect block additions is not guaranteed. * Such a guarantee would be non-trivial to achieve. The conventional * synchronous write implementation also does not make this guarantee. * Fsck should catch and fix discrepancies. Arguably, the file size * can be over-estimated without destroying integrity when the file * moves into the indirect blocks (i.e., is large). If we want to * postpone fsck, we are stuck with this argument. */ for (; adp; adp = TAILQ_NEXT(adp, ad_next)) dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0; } /* * Cancel an indirdep as a result of truncation. Release all of the * children allocindirs and place their journal work on the appropriate * list. */ static void cancel_indirdep(indirdep, bp, freeblks) struct indirdep *indirdep; struct buf *bp; struct freeblks *freeblks; { struct allocindir *aip; /* * None of the indirect pointers will ever be visible, * so they can simply be tossed. GOINGAWAY ensures * that allocated pointers will be saved in the buffer * cache until they are freed. Note that they will * only be able to be found by their physical address * since the inode mapping the logical address will * be gone. The save buffer used for the safe copy * was allocated in setup_allocindir_phase2 using * the physical address so it could be used for this * purpose. Hence we swap the safe copy with the real * copy, allowing the safe copy to be freed and holding * on to the real copy for later use in indir_trunc. */ if (indirdep->ir_state & GOINGAWAY) panic("cancel_indirdep: already gone"); if ((indirdep->ir_state & DEPCOMPLETE) == 0) { indirdep->ir_state |= DEPCOMPLETE; LIST_REMOVE(indirdep, ir_next); } indirdep->ir_state |= GOINGAWAY; /* * Pass in bp for blocks still have journal writes * pending so we can cancel them on their own. */ while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL) cancel_allocindir(aip, bp, freeblks, 0); while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) cancel_allocindir(aip, NULL, freeblks, 0); while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) cancel_allocindir(aip, NULL, freeblks, 0); while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) cancel_allocindir(aip, NULL, freeblks, 0); /* * If there are pending partial truncations we need to keep the * old block copy around until they complete. This is because * the current b_data is not a perfect superset of the available * blocks. */ if (TAILQ_EMPTY(&indirdep->ir_trunc)) bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount); else bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); WORKLIST_REMOVE(&indirdep->ir_list); WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list); indirdep->ir_bp = NULL; indirdep->ir_freeblks = freeblks; } /* * Free an indirdep once it no longer has new pointers to track. */ static void free_indirdep(indirdep) struct indirdep *indirdep; { KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc), ("free_indirdep: Indir trunc list not empty.")); KASSERT(LIST_EMPTY(&indirdep->ir_completehd), ("free_indirdep: Complete head not empty.")); KASSERT(LIST_EMPTY(&indirdep->ir_writehd), ("free_indirdep: write head not empty.")); KASSERT(LIST_EMPTY(&indirdep->ir_donehd), ("free_indirdep: done head not empty.")); KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd), ("free_indirdep: deplist head not empty.")); KASSERT((indirdep->ir_state & DEPCOMPLETE), ("free_indirdep: %p still on newblk list.", indirdep)); KASSERT(indirdep->ir_saveddata == NULL, ("free_indirdep: %p still has saved data.", indirdep)); if (indirdep->ir_state & ONWORKLIST) WORKLIST_REMOVE(&indirdep->ir_list); WORKITEM_FREE(indirdep, D_INDIRDEP); } /* * Called before a write to an indirdep. This routine is responsible for * rolling back pointers to a safe state which includes only those * allocindirs which have been completed. */ static void initiate_write_indirdep(indirdep, bp) struct indirdep *indirdep; struct buf *bp; { struct ufsmount *ump; indirdep->ir_state |= IOSTARTED; if (indirdep->ir_state & GOINGAWAY) panic("disk_io_initiation: indirdep gone"); /* * If there are no remaining dependencies, this will be writing * the real pointers. */ if (LIST_EMPTY(&indirdep->ir_deplisthd) && TAILQ_EMPTY(&indirdep->ir_trunc)) return; /* * Replace up-to-date version with safe version. */ if (indirdep->ir_saveddata == NULL) { ump = VFSTOUFS(indirdep->ir_list.wk_mp); LOCK_OWNED(ump); FREE_LOCK(ump); indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, M_SOFTDEP_FLAGS); ACQUIRE_LOCK(ump); } indirdep->ir_state &= ~ATTACHED; indirdep->ir_state |= UNDONE; bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); bcopy(indirdep->ir_savebp->b_data, bp->b_data, bp->b_bcount); } /* * Called when an inode has been cleared in a cg bitmap. This finally * eliminates any canceled jaddrefs */ void softdep_setup_inofree(mp, bp, ino, wkhd) struct mount *mp; struct buf *bp; ino_t ino; struct workhead *wkhd; { struct worklist *wk, *wkn; struct inodedep *inodedep; struct ufsmount *ump; uint8_t *inosused; struct cg *cgp; struct fs *fs; KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_setup_inofree called on non-softdep filesystem")); ump = VFSTOUFS(mp); ACQUIRE_LOCK(ump); fs = ump->um_fs; cgp = (struct cg *)bp->b_data; inosused = cg_inosused(cgp); if (isset(inosused, ino % fs->fs_ipg)) panic("softdep_setup_inofree: inode %ju not freed.", (uintmax_t)ino); if (inodedep_lookup(mp, ino, 0, &inodedep)) panic("softdep_setup_inofree: ino %ju has existing inodedep %p", (uintmax_t)ino, inodedep); if (wkhd) { LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) { if (wk->wk_type != D_JADDREF) continue; WORKLIST_REMOVE(wk); /* * We can free immediately even if the jaddref * isn't attached in a background write as now * the bitmaps are reconciled. */ wk->wk_state |= COMPLETE | ATTACHED; free_jaddref(WK_JADDREF(wk)); } jwork_move(&bp->b_dep, wkhd); } FREE_LOCK(ump); } /* * Called via ffs_blkfree() after a set of frags has been cleared from a cg * map. Any dependencies waiting for the write to clear are added to the * buf's list and any jnewblks that are being canceled are discarded * immediately. */ void softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) struct mount *mp; struct buf *bp; ufs2_daddr_t blkno; int frags; struct workhead *wkhd; { struct bmsafemap *bmsafemap; struct jnewblk *jnewblk; struct ufsmount *ump; struct worklist *wk; struct fs *fs; #ifdef SUJ_DEBUG uint8_t *blksfree; struct cg *cgp; ufs2_daddr_t jstart; ufs2_daddr_t jend; ufs2_daddr_t end; long bno; int i; #endif CTR3(KTR_SUJ, "softdep_setup_blkfree: blkno %jd frags %d wk head %p", blkno, frags, wkhd); ump = VFSTOUFS(mp); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_setup_blkfree called on non-softdep filesystem")); ACQUIRE_LOCK(ump); /* Lookup the bmsafemap so we track when it is dirty. */ fs = ump->um_fs; bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); /* * Detach any jnewblks which have been canceled. They must linger * until the bitmap is cleared again by ffs_blkfree() to prevent * an unjournaled allocation from hitting the disk. */ if (wkhd) { while ((wk = LIST_FIRST(wkhd)) != NULL) { CTR2(KTR_SUJ, "softdep_setup_blkfree: blkno %jd wk type %d", blkno, wk->wk_type); WORKLIST_REMOVE(wk); if (wk->wk_type != D_JNEWBLK) { WORKLIST_INSERT(&bmsafemap->sm_freehd, wk); continue; } jnewblk = WK_JNEWBLK(wk); KASSERT(jnewblk->jn_state & GOINGAWAY, ("softdep_setup_blkfree: jnewblk not canceled.")); #ifdef SUJ_DEBUG /* * Assert that this block is free in the bitmap * before we discard the jnewblk. */ cgp = (struct cg *)bp->b_data; blksfree = cg_blksfree(cgp); bno = dtogd(fs, jnewblk->jn_blkno); for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { if (isset(blksfree, bno + i)) continue; panic("softdep_setup_blkfree: not free"); } #endif /* * Even if it's not attached we can free immediately * as the new bitmap is correct. */ wk->wk_state |= COMPLETE | ATTACHED; free_jnewblk(jnewblk); } } #ifdef SUJ_DEBUG /* * Assert that we are not freeing a block which has an outstanding * allocation dependency. */ fs = VFSTOUFS(mp)->um_fs; bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); end = blkno + frags; LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { /* * Don't match against blocks that will be freed when the * background write is done. */ if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) == (COMPLETE | DEPCOMPLETE)) continue; jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags; jend = jnewblk->jn_blkno + jnewblk->jn_frags; if ((blkno >= jstart && blkno < jend) || (end > jstart && end <= jend)) { printf("state 0x%X %jd - %d %d dep %p\n", jnewblk->jn_state, jnewblk->jn_blkno, jnewblk->jn_oldfrags, jnewblk->jn_frags, jnewblk->jn_dep); panic("softdep_setup_blkfree: " "%jd-%jd(%d) overlaps with %jd-%jd", blkno, end, frags, jstart, jend); } } #endif FREE_LOCK(ump); } /* * Revert a block allocation when the journal record that describes it * is not yet written. */ static int jnewblk_rollback(jnewblk, fs, cgp, blksfree) struct jnewblk *jnewblk; struct fs *fs; struct cg *cgp; uint8_t *blksfree; { ufs1_daddr_t fragno; long cgbno, bbase; int frags, blk; int i; frags = 0; cgbno = dtogd(fs, jnewblk->jn_blkno); /* * We have to test which frags need to be rolled back. We may * be operating on a stale copy when doing background writes. */ for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) if (isclr(blksfree, cgbno + i)) frags++; if (frags == 0) return (0); /* * This is mostly ffs_blkfree() sans some validation and * superblock updates. */ if (frags == fs->fs_frag) { fragno = fragstoblks(fs, cgbno); ffs_setblock(fs, blksfree, fragno); ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; } else { cgbno += jnewblk->jn_oldfrags; bbase = cgbno - fragnum(fs, cgbno); /* Decrement the old frags. */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1); /* Deallocate the fragment */ for (i = 0; i < frags; i++) setbit(blksfree, cgbno + i); cgp->cg_cs.cs_nffree += frags; /* Add back in counts associated with the new frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1); /* If a complete block has been reassembled, account for it. */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, blksfree, fragno)) { cgp->cg_cs.cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; } } stat_jnewblk++; jnewblk->jn_state &= ~ATTACHED; jnewblk->jn_state |= UNDONE; return (frags); } static void initiate_write_bmsafemap(bmsafemap, bp) struct bmsafemap *bmsafemap; struct buf *bp; /* The cg block. */ { struct jaddref *jaddref; struct jnewblk *jnewblk; uint8_t *inosused; uint8_t *blksfree; struct cg *cgp; struct fs *fs; ino_t ino; /* * If this is a background write, we did this at the time that * the copy was made, so do not need to do it again. */ if (bmsafemap->sm_state & IOSTARTED) return; bmsafemap->sm_state |= IOSTARTED; /* * Clear any inode allocations which are pending journal writes. */ if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) { cgp = (struct cg *)bp->b_data; fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; inosused = cg_inosused(cgp); LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) { ino = jaddref->ja_ino % fs->fs_ipg; if (isset(inosused, ino)) { if ((jaddref->ja_mode & IFMT) == IFDIR) cgp->cg_cs.cs_ndir--; cgp->cg_cs.cs_nifree++; clrbit(inosused, ino); jaddref->ja_state &= ~ATTACHED; jaddref->ja_state |= UNDONE; stat_jaddref++; } else panic("initiate_write_bmsafemap: inode %ju " "marked free", (uintmax_t)jaddref->ja_ino); } } /* * Clear any block allocations which are pending journal writes. */ if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { cgp = (struct cg *)bp->b_data; fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; blksfree = cg_blksfree(cgp); LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { if (jnewblk_rollback(jnewblk, fs, cgp, blksfree)) continue; panic("initiate_write_bmsafemap: block %jd " "marked free", jnewblk->jn_blkno); } } /* * Move allocation lists to the written lists so they can be * cleared once the block write is complete. */ LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr, inodedep, id_deps); LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, newblk, nb_deps); LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, wk_list); } /* * This routine is called during the completion interrupt * service routine for a disk write (from the procedure called * by the device driver to inform the filesystem caches of * a request completion). It should be called early in this * procedure, before the block is made available to other * processes or other routines are called. * */ static void softdep_disk_write_complete(bp) struct buf *bp; /* describes the completed disk write */ { struct worklist *wk; struct worklist *owk; struct ufsmount *ump; struct workhead reattach; struct freeblks *freeblks; struct buf *sbp; ump = softdep_bp_to_mp(bp); if (ump == NULL) return; sbp = NULL; /* * If an error occurred while doing the write, then the data * has not hit the disk and the dependencies cannot be processed. * But we do have to go through and roll forward any dependencies * that were rolled back before the disk write. */ ACQUIRE_LOCK(ump); if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) { LIST_FOREACH(wk, &bp->b_dep, wk_list) { switch (wk->wk_type) { case D_PAGEDEP: handle_written_filepage(WK_PAGEDEP(wk), bp, 0); continue; case D_INODEDEP: handle_written_inodeblock(WK_INODEDEP(wk), bp, 0); continue; case D_BMSAFEMAP: handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp, 0); continue; case D_INDIRDEP: handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp, 0); continue; default: /* nothing to roll forward */ continue; } } FREE_LOCK(ump); return; } LIST_INIT(&reattach); /* * Ump SU lock must not be released anywhere in this code segment. */ owk = NULL; while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { WORKLIST_REMOVE(wk); atomic_add_long(&dep_write[wk->wk_type], 1); if (wk == owk) panic("duplicate worklist: %p\n", wk); owk = wk; switch (wk->wk_type) { case D_PAGEDEP: if (handle_written_filepage(WK_PAGEDEP(wk), bp, WRITESUCCEEDED)) WORKLIST_INSERT(&reattach, wk); continue; case D_INODEDEP: if (handle_written_inodeblock(WK_INODEDEP(wk), bp, WRITESUCCEEDED)) WORKLIST_INSERT(&reattach, wk); continue; case D_BMSAFEMAP: if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp, WRITESUCCEEDED)) WORKLIST_INSERT(&reattach, wk); continue; case D_MKDIR: handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); continue; case D_ALLOCDIRECT: wk->wk_state |= COMPLETE; handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL); continue; case D_ALLOCINDIR: wk->wk_state |= COMPLETE; handle_allocindir_partdone(WK_ALLOCINDIR(wk)); continue; case D_INDIRDEP: if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp, WRITESUCCEEDED)) WORKLIST_INSERT(&reattach, wk); continue; case D_FREEBLKS: wk->wk_state |= COMPLETE; freeblks = WK_FREEBLKS(wk); if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) add_to_worklist(wk, WK_NODELAY); continue; case D_FREEWORK: handle_written_freework(WK_FREEWORK(wk)); break; case D_JSEGDEP: free_jsegdep(WK_JSEGDEP(wk)); continue; case D_JSEG: handle_written_jseg(WK_JSEG(wk), bp); continue; case D_SBDEP: if (handle_written_sbdep(WK_SBDEP(wk), bp)) WORKLIST_INSERT(&reattach, wk); continue; case D_FREEDEP: free_freedep(WK_FREEDEP(wk)); continue; default: panic("handle_disk_write_complete: Unknown type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } /* * Reattach any requests that must be redone. */ while ((wk = LIST_FIRST(&reattach)) != NULL) { WORKLIST_REMOVE(wk); WORKLIST_INSERT(&bp->b_dep, wk); } FREE_LOCK(ump); if (sbp) brelse(sbp); } /* * Called from within softdep_disk_write_complete above. Note that * this routine is always called from interrupt level with further * splbio interrupts blocked. */ static void handle_allocdirect_partdone(adp, wkhd) struct allocdirect *adp; /* the completed allocdirect */ struct workhead *wkhd; /* Work to do when inode is writtne. */ { struct allocdirectlst *listhead; struct allocdirect *listadp; struct inodedep *inodedep; long bsize; if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) return; /* * The on-disk inode cannot claim to be any larger than the last * fragment that has been written. Otherwise, the on-disk inode * might have fragments that were not the last block in the file * which would corrupt the filesystem. Thus, we cannot free any * allocdirects after one whose ad_oldblkno claims a fragment as * these blocks must be rolled back to zero before writing the inode. * We check the currently active set of allocdirects in id_inoupdt * or id_extupdt as appropriate. */ inodedep = adp->ad_inodedep; bsize = inodedep->id_fs->fs_bsize; if (adp->ad_state & EXTDATA) listhead = &inodedep->id_extupdt; else listhead = &inodedep->id_inoupdt; TAILQ_FOREACH(listadp, listhead, ad_next) { /* found our block */ if (listadp == adp) break; /* continue if ad_oldlbn is not a fragment */ if (listadp->ad_oldsize == 0 || listadp->ad_oldsize == bsize) continue; /* hit a fragment */ return; } /* * If we have reached the end of the current list without * finding the just finished dependency, then it must be * on the future dependency list. Future dependencies cannot * be freed until they are moved to the current list. */ if (listadp == NULL) { #ifdef DEBUG if (adp->ad_state & EXTDATA) listhead = &inodedep->id_newextupdt; else listhead = &inodedep->id_newinoupdt; TAILQ_FOREACH(listadp, listhead, ad_next) /* found our block */ if (listadp == adp) break; if (listadp == NULL) panic("handle_allocdirect_partdone: lost dep"); #endif /* DEBUG */ return; } /* * If we have found the just finished dependency, then queue * it along with anything that follows it that is complete. * Since the pointer has not yet been written in the inode * as the dependency prevents it, place the allocdirect on the * bufwait list where it will be freed once the pointer is * valid. */ if (wkhd == NULL) wkhd = &inodedep->id_bufwait; for (; adp; adp = listadp) { listadp = TAILQ_NEXT(adp, ad_next); if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) return; TAILQ_REMOVE(listhead, adp, ad_next); WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list); } } /* * Called from within softdep_disk_write_complete above. This routine * completes successfully written allocindirs. */ static void handle_allocindir_partdone(aip) struct allocindir *aip; /* the completed allocindir */ { struct indirdep *indirdep; if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) return; indirdep = aip->ai_indirdep; LIST_REMOVE(aip, ai_next); /* * Don't set a pointer while the buffer is undergoing IO or while * we have active truncations. */ if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) { LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); return; } if (indirdep->ir_state & UFS1FMT) ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = aip->ai_newblkno; else ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = aip->ai_newblkno; /* * Await the pointer write before freeing the allocindir. */ LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next); } /* * Release segments held on a jwork list. */ static void handle_jwork(wkhd) struct workhead *wkhd; { struct worklist *wk; while ((wk = LIST_FIRST(wkhd)) != NULL) { WORKLIST_REMOVE(wk); switch (wk->wk_type) { case D_JSEGDEP: free_jsegdep(WK_JSEGDEP(wk)); continue; case D_FREEDEP: free_freedep(WK_FREEDEP(wk)); continue; case D_FREEFRAG: rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep)); WORKITEM_FREE(wk, D_FREEFRAG); continue; case D_FREEWORK: handle_written_freework(WK_FREEWORK(wk)); continue; default: panic("handle_jwork: Unknown type %s\n", TYPENAME(wk->wk_type)); } } } /* * Handle the bufwait list on an inode when it is safe to release items * held there. This normally happens after an inode block is written but * may be delayed and handled later if there are pending journal items that * are not yet safe to be released. */ static struct freefile * handle_bufwait(inodedep, refhd) struct inodedep *inodedep; struct workhead *refhd; { struct jaddref *jaddref; struct freefile *freefile; struct worklist *wk; freefile = NULL; while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { WORKLIST_REMOVE(wk); switch (wk->wk_type) { case D_FREEFILE: /* * We defer adding freefile to the worklist * until all other additions have been made to * ensure that it will be done after all the * old blocks have been freed. */ if (freefile != NULL) panic("handle_bufwait: freefile"); freefile = WK_FREEFILE(wk); continue; case D_MKDIR: handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); continue; case D_DIRADD: diradd_inode_written(WK_DIRADD(wk), inodedep); continue; case D_FREEFRAG: wk->wk_state |= COMPLETE; if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE) add_to_worklist(wk, 0); continue; case D_DIRREM: wk->wk_state |= COMPLETE; add_to_worklist(wk, 0); continue; case D_ALLOCDIRECT: case D_ALLOCINDIR: free_newblk(WK_NEWBLK(wk)); continue; case D_JNEWBLK: wk->wk_state |= COMPLETE; free_jnewblk(WK_JNEWBLK(wk)); continue; /* * Save freed journal segments and add references on * the supplied list which will delay their release * until the cg bitmap is cleared on disk. */ case D_JSEGDEP: if (refhd == NULL) free_jsegdep(WK_JSEGDEP(wk)); else WORKLIST_INSERT(refhd, wk); continue; case D_JADDREF: jaddref = WK_JADDREF(wk); TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); /* * Transfer any jaddrefs to the list to be freed with * the bitmap if we're handling a removed file. */ if (refhd == NULL) { wk->wk_state |= COMPLETE; free_jaddref(jaddref); } else WORKLIST_INSERT(refhd, wk); continue; default: panic("handle_bufwait: Unknown type %p(%s)", wk, TYPENAME(wk->wk_type)); /* NOTREACHED */ } } return (freefile); } /* * Called from within softdep_disk_write_complete above to restore * in-memory inode block contents to their most up-to-date state. Note * that this routine is always called from interrupt level with further * interrupts from this device blocked. * * If the write did not succeed, we will do all the roll-forward * operations, but we will not take the actions that will allow its * dependencies to be processed. */ static int handle_written_inodeblock(inodedep, bp, flags) struct inodedep *inodedep; struct buf *bp; /* buffer containing the inode block */ int flags; { struct freefile *freefile; struct allocdirect *adp, *nextadp; struct ufs1_dinode *dp1 = NULL; struct ufs2_dinode *dp2 = NULL; struct workhead wkhd; int hadchanges, fstype; ino_t freelink; LIST_INIT(&wkhd); hadchanges = 0; freefile = NULL; if ((inodedep->id_state & IOSTARTED) == 0) panic("handle_written_inodeblock: not started"); inodedep->id_state &= ~IOSTARTED; if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { fstype = UFS1; dp1 = (struct ufs1_dinode *)bp->b_data + ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); freelink = dp1->di_freelink; } else { fstype = UFS2; dp2 = (struct ufs2_dinode *)bp->b_data + ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); freelink = dp2->di_freelink; } /* * Leave this inodeblock dirty until it's in the list. */ if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED && (flags & WRITESUCCEEDED)) { struct inodedep *inon; inon = TAILQ_NEXT(inodedep, id_unlinked); if ((inon == NULL && freelink == 0) || (inon && inon->id_ino == freelink)) { if (inon) inon->id_state |= UNLINKPREV; inodedep->id_state |= UNLINKNEXT; } hadchanges = 1; } /* * If we had to rollback the inode allocation because of * bitmaps being incomplete, then simply restore it. * Keep the block dirty so that it will not be reclaimed until * all associated dependencies have been cleared and the * corresponding updates written to disk. */ if (inodedep->id_savedino1 != NULL) { hadchanges = 1; if (fstype == UFS1) *dp1 = *inodedep->id_savedino1; else *dp2 = *inodedep->id_savedino2; free(inodedep->id_savedino1, M_SAVEDINO); inodedep->id_savedino1 = NULL; if ((bp->b_flags & B_DELWRI) == 0) stat_inode_bitmap++; bdirty(bp); /* * If the inode is clear here and GOINGAWAY it will never * be written. Process the bufwait and clear any pending * work which may include the freefile. */ if (inodedep->id_state & GOINGAWAY) goto bufwait; return (1); } if (flags & WRITESUCCEEDED) inodedep->id_state |= COMPLETE; /* * Roll forward anything that had to be rolled back before * the inode could be updated. */ for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { nextadp = TAILQ_NEXT(adp, ad_next); if (adp->ad_state & ATTACHED) panic("handle_written_inodeblock: new entry"); if (fstype == UFS1) { if (adp->ad_offset < UFS_NDADDR) { if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno) panic("%s %s #%jd mismatch %d != %jd", "handle_written_inodeblock:", "direct pointer", (intmax_t)adp->ad_offset, dp1->di_db[adp->ad_offset], (intmax_t)adp->ad_oldblkno); dp1->di_db[adp->ad_offset] = adp->ad_newblkno; } else { if (dp1->di_ib[adp->ad_offset - UFS_NDADDR] != 0) panic("%s: %s #%jd allocated as %d", "handle_written_inodeblock", "indirect pointer", (intmax_t)adp->ad_offset - UFS_NDADDR, dp1->di_ib[adp->ad_offset - UFS_NDADDR]); dp1->di_ib[adp->ad_offset - UFS_NDADDR] = adp->ad_newblkno; } } else { if (adp->ad_offset < UFS_NDADDR) { if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno) panic("%s: %s #%jd %s %jd != %jd", "handle_written_inodeblock", "direct pointer", (intmax_t)adp->ad_offset, "mismatch", (intmax_t)dp2->di_db[adp->ad_offset], (intmax_t)adp->ad_oldblkno); dp2->di_db[adp->ad_offset] = adp->ad_newblkno; } else { if (dp2->di_ib[adp->ad_offset - UFS_NDADDR] != 0) panic("%s: %s #%jd allocated as %jd", "handle_written_inodeblock", "indirect pointer", (intmax_t)adp->ad_offset - UFS_NDADDR, (intmax_t) dp2->di_ib[adp->ad_offset - UFS_NDADDR]); dp2->di_ib[adp->ad_offset - UFS_NDADDR] = adp->ad_newblkno; } } adp->ad_state &= ~UNDONE; adp->ad_state |= ATTACHED; hadchanges = 1; } for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { nextadp = TAILQ_NEXT(adp, ad_next); if (adp->ad_state & ATTACHED) panic("handle_written_inodeblock: new entry"); if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno) panic("%s: direct pointers #%jd %s %jd != %jd", "handle_written_inodeblock", (intmax_t)adp->ad_offset, "mismatch", (intmax_t)dp2->di_extb[adp->ad_offset], (intmax_t)adp->ad_oldblkno); dp2->di_extb[adp->ad_offset] = adp->ad_newblkno; adp->ad_state &= ~UNDONE; adp->ad_state |= ATTACHED; hadchanges = 1; } if (hadchanges && (bp->b_flags & B_DELWRI) == 0) stat_direct_blk_ptrs++; /* * Reset the file size to its most up-to-date value. */ if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) panic("handle_written_inodeblock: bad size"); if (inodedep->id_savednlink > UFS_LINK_MAX) panic("handle_written_inodeblock: Invalid link count " "%jd for inodedep %p", (uintmax_t)inodedep->id_savednlink, inodedep); if (fstype == UFS1) { if (dp1->di_nlink != inodedep->id_savednlink) { dp1->di_nlink = inodedep->id_savednlink; hadchanges = 1; } if (dp1->di_size != inodedep->id_savedsize) { dp1->di_size = inodedep->id_savedsize; hadchanges = 1; } } else { if (dp2->di_nlink != inodedep->id_savednlink) { dp2->di_nlink = inodedep->id_savednlink; hadchanges = 1; } if (dp2->di_size != inodedep->id_savedsize) { dp2->di_size = inodedep->id_savedsize; hadchanges = 1; } if (dp2->di_extsize != inodedep->id_savedextsize) { dp2->di_extsize = inodedep->id_savedextsize; hadchanges = 1; } } inodedep->id_savedsize = -1; inodedep->id_savedextsize = -1; inodedep->id_savednlink = -1; /* * If there were any rollbacks in the inode block, then it must be * marked dirty so that its will eventually get written back in * its correct form. */ if (hadchanges) bdirty(bp); bufwait: /* * If the write did not succeed, we have done all the roll-forward * operations, but we cannot take the actions that will allow its * dependencies to be processed. */ if ((flags & WRITESUCCEEDED) == 0) return (hadchanges); /* * Process any allocdirects that completed during the update. */ if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) handle_allocdirect_partdone(adp, &wkhd); if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) handle_allocdirect_partdone(adp, &wkhd); /* * Process deallocations that were held pending until the * inode had been written to disk. Freeing of the inode * is delayed until after all blocks have been freed to * avoid creation of new triples * before the old ones have been deleted. Completely * unlinked inodes are not processed until the unlinked * inode list is written or the last reference is removed. */ if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) { freefile = handle_bufwait(inodedep, NULL); if (freefile && !LIST_EMPTY(&wkhd)) { WORKLIST_INSERT(&wkhd, &freefile->fx_list); freefile = NULL; } } /* * Move rolled forward dependency completions to the bufwait list * now that those that were already written have been processed. */ if (!LIST_EMPTY(&wkhd) && hadchanges == 0) panic("handle_written_inodeblock: bufwait but no changes"); jwork_move(&inodedep->id_bufwait, &wkhd); if (freefile != NULL) { /* * If the inode is goingaway it was never written. Fake up * the state here so free_inodedep() can succeed. */ if (inodedep->id_state & GOINGAWAY) inodedep->id_state |= COMPLETE | DEPCOMPLETE; if (free_inodedep(inodedep) == 0) panic("handle_written_inodeblock: live inodedep %p", inodedep); add_to_worklist(&freefile->fx_list, 0); return (0); } /* * If no outstanding dependencies, free it. */ if (free_inodedep(inodedep) || (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 && TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && TAILQ_FIRST(&inodedep->id_extupdt) == 0 && LIST_FIRST(&inodedep->id_bufwait) == 0)) return (0); return (hadchanges); } /* * Perform needed roll-forwards and kick off any dependencies that * can now be processed. * * If the write did not succeed, we will do all the roll-forward * operations, but we will not take the actions that will allow its * dependencies to be processed. */ static int handle_written_indirdep(indirdep, bp, bpp, flags) struct indirdep *indirdep; struct buf *bp; struct buf **bpp; int flags; { struct allocindir *aip; struct buf *sbp; int chgs; if (indirdep->ir_state & GOINGAWAY) panic("handle_written_indirdep: indirdep gone"); if ((indirdep->ir_state & IOSTARTED) == 0) panic("handle_written_indirdep: IO not started"); chgs = 0; /* * If there were rollbacks revert them here. */ if (indirdep->ir_saveddata) { bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); if (TAILQ_EMPTY(&indirdep->ir_trunc)) { free(indirdep->ir_saveddata, M_INDIRDEP); indirdep->ir_saveddata = NULL; } chgs = 1; } indirdep->ir_state &= ~(UNDONE | IOSTARTED); indirdep->ir_state |= ATTACHED; /* * If the write did not succeed, we have done all the roll-forward * operations, but we cannot take the actions that will allow its * dependencies to be processed. */ if ((flags & WRITESUCCEEDED) == 0) { stat_indir_blk_ptrs++; bdirty(bp); return (1); } /* * Move allocindirs with written pointers to the completehd if * the indirdep's pointer is not yet written. Otherwise * free them here. */ while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) { LIST_REMOVE(aip, ai_next); if ((indirdep->ir_state & DEPCOMPLETE) == 0) { LIST_INSERT_HEAD(&indirdep->ir_completehd, aip, ai_next); newblk_freefrag(&aip->ai_block); continue; } free_newblk(&aip->ai_block); } /* * Move allocindirs that have finished dependency processing from * the done list to the write list after updating the pointers. */ if (TAILQ_EMPTY(&indirdep->ir_trunc)) { while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) { handle_allocindir_partdone(aip); if (aip == LIST_FIRST(&indirdep->ir_donehd)) panic("disk_write_complete: not gone"); chgs = 1; } } /* * Preserve the indirdep if there were any changes or if it is not * yet valid on disk. */ if (chgs) { stat_indir_blk_ptrs++; bdirty(bp); return (1); } /* * If there were no changes we can discard the savedbp and detach * ourselves from the buf. We are only carrying completed pointers * in this case. */ sbp = indirdep->ir_savebp; sbp->b_flags |= B_INVAL | B_NOCACHE; indirdep->ir_savebp = NULL; indirdep->ir_bp = NULL; if (*bpp != NULL) panic("handle_written_indirdep: bp already exists."); *bpp = sbp; /* * The indirdep may not be freed until its parent points at it. */ if (indirdep->ir_state & DEPCOMPLETE) free_indirdep(indirdep); return (0); } /* * Process a diradd entry after its dependent inode has been written. * This routine must be called with splbio interrupts blocked. */ static void diradd_inode_written(dap, inodedep) struct diradd *dap; struct inodedep *inodedep; { dap->da_state |= COMPLETE; complete_diradd(dap); WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); } /* * Returns true if the bmsafemap will have rollbacks when written. Must only * be called with the per-filesystem lock and the buf lock on the cg held. */ static int bmsafemap_backgroundwrite(bmsafemap, bp) struct bmsafemap *bmsafemap; struct buf *bp; { int dirty; LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp)); dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) | !LIST_EMPTY(&bmsafemap->sm_jnewblkhd); /* * If we're initiating a background write we need to process the * rollbacks as they exist now, not as they exist when IO starts. * No other consumers will look at the contents of the shadowed * buf so this is safe to do here. */ if (bp->b_xflags & BX_BKGRDMARKER) initiate_write_bmsafemap(bmsafemap, bp); return (dirty); } /* * Re-apply an allocation when a cg write is complete. */ static int jnewblk_rollforward(jnewblk, fs, cgp, blksfree) struct jnewblk *jnewblk; struct fs *fs; struct cg *cgp; uint8_t *blksfree; { ufs1_daddr_t fragno; ufs2_daddr_t blkno; long cgbno, bbase; int frags, blk; int i; frags = 0; cgbno = dtogd(fs, jnewblk->jn_blkno); for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { if (isclr(blksfree, cgbno + i)) panic("jnewblk_rollforward: re-allocated fragment"); frags++; } if (frags == fs->fs_frag) { blkno = fragstoblks(fs, cgbno); ffs_clrblock(fs, blksfree, (long)blkno); ffs_clusteracct(fs, cgp, blkno, -1); cgp->cg_cs.cs_nbfree--; } else { bbase = cgbno - fragnum(fs, cgbno); cgbno += jnewblk->jn_oldfrags; /* If a complete block had been reassembled, account for it. */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, blksfree, fragno)) { cgp->cg_cs.cs_nffree += fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, -1); cgp->cg_cs.cs_nbfree--; } /* Decrement the old frags. */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1); /* Allocate the fragment */ for (i = 0; i < frags; i++) clrbit(blksfree, cgbno + i); cgp->cg_cs.cs_nffree -= frags; /* Add back in counts associated with the new frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1); } return (frags); } /* * Complete a write to a bmsafemap structure. Roll forward any bitmap * changes if it's not a background write. Set all written dependencies * to DEPCOMPLETE and free the structure if possible. * * If the write did not succeed, we will do all the roll-forward * operations, but we will not take the actions that will allow its * dependencies to be processed. */ static int handle_written_bmsafemap(bmsafemap, bp, flags) struct bmsafemap *bmsafemap; struct buf *bp; int flags; { struct newblk *newblk; struct inodedep *inodedep; struct jaddref *jaddref, *jatmp; struct jnewblk *jnewblk, *jntmp; struct ufsmount *ump; uint8_t *inosused; uint8_t *blksfree; struct cg *cgp; struct fs *fs; ino_t ino; int foreground; int chgs; if ((bmsafemap->sm_state & IOSTARTED) == 0) panic("handle_written_bmsafemap: Not started\n"); ump = VFSTOUFS(bmsafemap->sm_list.wk_mp); chgs = 0; bmsafemap->sm_state &= ~IOSTARTED; foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0; /* * If write was successful, release journal work that was waiting * on the write. Otherwise move the work back. */ if (flags & WRITESUCCEEDED) handle_jwork(&bmsafemap->sm_freewr); else LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, wk_list); /* * Restore unwritten inode allocation pending jaddref writes. */ if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) { cgp = (struct cg *)bp->b_data; fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; inosused = cg_inosused(cgp); LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps, jatmp) { if ((jaddref->ja_state & UNDONE) == 0) continue; ino = jaddref->ja_ino % fs->fs_ipg; if (isset(inosused, ino)) panic("handle_written_bmsafemap: " "re-allocated inode"); /* Do the roll-forward only if it's a real copy. */ if (foreground) { if ((jaddref->ja_mode & IFMT) == IFDIR) cgp->cg_cs.cs_ndir++; cgp->cg_cs.cs_nifree--; setbit(inosused, ino); chgs = 1; } jaddref->ja_state &= ~UNDONE; jaddref->ja_state |= ATTACHED; free_jaddref(jaddref); } } /* * Restore any block allocations which are pending journal writes. */ if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { cgp = (struct cg *)bp->b_data; fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; blksfree = cg_blksfree(cgp); LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps, jntmp) { if ((jnewblk->jn_state & UNDONE) == 0) continue; /* Do the roll-forward only if it's a real copy. */ if (foreground && jnewblk_rollforward(jnewblk, fs, cgp, blksfree)) chgs = 1; jnewblk->jn_state &= ~(UNDONE | NEWBLOCK); jnewblk->jn_state |= ATTACHED; free_jnewblk(jnewblk); } } /* * If the write did not succeed, we have done all the roll-forward * operations, but we cannot take the actions that will allow its * dependencies to be processed. */ if ((flags & WRITESUCCEEDED) == 0) { LIST_CONCAT(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, newblk, nb_deps); LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, wk_list); if (foreground) bdirty(bp); return (1); } while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) { newblk->nb_state |= DEPCOMPLETE; newblk->nb_state &= ~ONDEPLIST; newblk->nb_bmsafemap = NULL; LIST_REMOVE(newblk, nb_deps); if (newblk->nb_list.wk_type == D_ALLOCDIRECT) handle_allocdirect_partdone( WK_ALLOCDIRECT(&newblk->nb_list), NULL); else if (newblk->nb_list.wk_type == D_ALLOCINDIR) handle_allocindir_partdone( WK_ALLOCINDIR(&newblk->nb_list)); else if (newblk->nb_list.wk_type != D_NEWBLK) panic("handle_written_bmsafemap: Unexpected type: %s", TYPENAME(newblk->nb_list.wk_type)); } while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) { inodedep->id_state |= DEPCOMPLETE; inodedep->id_state &= ~ONDEPLIST; LIST_REMOVE(inodedep, id_deps); inodedep->id_bmsafemap = NULL; } LIST_REMOVE(bmsafemap, sm_next); if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) && LIST_EMPTY(&bmsafemap->sm_jnewblkhd) && LIST_EMPTY(&bmsafemap->sm_newblkhd) && LIST_EMPTY(&bmsafemap->sm_inodedephd) && LIST_EMPTY(&bmsafemap->sm_freehd)) { LIST_REMOVE(bmsafemap, sm_hash); WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); return (0); } LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); if (foreground) bdirty(bp); return (1); } /* * Try to free a mkdir dependency. */ static void complete_mkdir(mkdir) struct mkdir *mkdir; { struct diradd *dap; if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE) return; LIST_REMOVE(mkdir, md_mkdirs); dap = mkdir->md_diradd; dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) { dap->da_state |= DEPCOMPLETE; complete_diradd(dap); } WORKITEM_FREE(mkdir, D_MKDIR); } /* * Handle the completion of a mkdir dependency. */ static void handle_written_mkdir(mkdir, type) struct mkdir *mkdir; int type; { if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type) panic("handle_written_mkdir: bad type"); mkdir->md_state |= COMPLETE; complete_mkdir(mkdir); } static int free_pagedep(pagedep) struct pagedep *pagedep; { int i; if (pagedep->pd_state & NEWBLOCK) return (0); if (!LIST_EMPTY(&pagedep->pd_dirremhd)) return (0); for (i = 0; i < DAHASHSZ; i++) if (!LIST_EMPTY(&pagedep->pd_diraddhd[i])) return (0); if (!LIST_EMPTY(&pagedep->pd_pendinghd)) return (0); if (!LIST_EMPTY(&pagedep->pd_jmvrefhd)) return (0); if (pagedep->pd_state & ONWORKLIST) WORKLIST_REMOVE(&pagedep->pd_list); LIST_REMOVE(pagedep, pd_hash); WORKITEM_FREE(pagedep, D_PAGEDEP); return (1); } /* * Called from within softdep_disk_write_complete above. * A write operation was just completed. Removed inodes can * now be freed and associated block pointers may be committed. * Note that this routine is always called from interrupt level * with further interrupts from this device blocked. * * If the write did not succeed, we will do all the roll-forward * operations, but we will not take the actions that will allow its * dependencies to be processed. */ static int handle_written_filepage(pagedep, bp, flags) struct pagedep *pagedep; struct buf *bp; /* buffer containing the written page */ int flags; { struct dirrem *dirrem; struct diradd *dap, *nextdap; struct direct *ep; int i, chgs; if ((pagedep->pd_state & IOSTARTED) == 0) panic("handle_written_filepage: not started"); pagedep->pd_state &= ~IOSTARTED; if ((flags & WRITESUCCEEDED) == 0) goto rollforward; /* * Process any directory removals that have been committed. */ while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { LIST_REMOVE(dirrem, dm_next); dirrem->dm_state |= COMPLETE; dirrem->dm_dirinum = pagedep->pd_ino; KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), ("handle_written_filepage: Journal entries not written.")); add_to_worklist(&dirrem->dm_list, 0); } /* * Free any directory additions that have been committed. * If it is a newly allocated block, we have to wait until * the on-disk directory inode claims the new block. */ if ((pagedep->pd_state & NEWBLOCK) == 0) while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) free_diradd(dap, NULL); rollforward: /* * Uncommitted directory entries must be restored. */ for (chgs = 0, i = 0; i < DAHASHSZ; i++) { for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; dap = nextdap) { nextdap = LIST_NEXT(dap, da_pdlist); if (dap->da_state & ATTACHED) panic("handle_written_filepage: attached"); ep = (struct direct *) ((char *)bp->b_data + dap->da_offset); ep->d_ino = dap->da_newinum; dap->da_state &= ~UNDONE; dap->da_state |= ATTACHED; chgs = 1; /* * If the inode referenced by the directory has * been written out, then the dependency can be * moved to the pending list. */ if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); } } } /* * If there were any rollbacks in the directory, then it must be * marked dirty so that its will eventually get written back in * its correct form. */ if (chgs || (flags & WRITESUCCEEDED) == 0) { if ((bp->b_flags & B_DELWRI) == 0) stat_dir_entry++; bdirty(bp); return (1); } /* * If we are not waiting for a new directory block to be * claimed by its inode, then the pagedep will be freed. * Otherwise it will remain to track any new entries on * the page in case they are fsync'ed. */ free_pagedep(pagedep); return (0); } /* * Writing back in-core inode structures. * * The filesystem only accesses an inode's contents when it occupies an * "in-core" inode structure. These "in-core" structures are separate from * the page frames used to cache inode blocks. Only the latter are * transferred to/from the disk. So, when the updated contents of the * "in-core" inode structure are copied to the corresponding in-memory inode * block, the dependencies are also transferred. The following procedure is * called when copying a dirty "in-core" inode to a cached inode block. */ /* * Called when an inode is loaded from disk. If the effective link count * differed from the actual link count when it was last flushed, then we * need to ensure that the correct effective link count is put back. */ void softdep_load_inodeblock(ip) struct inode *ip; /* the "in_core" copy of the inode */ { struct inodedep *inodedep; struct ufsmount *ump; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_load_inodeblock called on non-softdep filesystem")); /* * Check for alternate nlink count. */ ip->i_effnlink = ip->i_nlink; ACQUIRE_LOCK(ump); if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0) { FREE_LOCK(ump); return; } ip->i_effnlink -= inodedep->id_nlinkdelta; FREE_LOCK(ump); } /* * This routine is called just before the "in-core" inode * information is to be copied to the in-memory inode block. * Recall that an inode block contains several inodes. If * the force flag is set, then the dependencies will be * cleared so that the update can always be made. Note that * the buffer is locked when this routine is called, so we * will never be in the middle of writing the inode block * to disk. */ void softdep_update_inodeblock(ip, bp, waitfor) struct inode *ip; /* the "in_core" copy of the inode */ struct buf *bp; /* the buffer containing the inode block */ int waitfor; /* nonzero => update must be allowed */ { struct inodedep *inodedep; struct inoref *inoref; struct ufsmount *ump; struct worklist *wk; struct mount *mp; struct buf *ibp; struct fs *fs; int error; ump = ITOUMP(ip); mp = UFSTOVFS(ump); KASSERT(MOUNTEDSOFTDEP(mp) != 0, ("softdep_update_inodeblock called on non-softdep filesystem")); fs = ump->um_fs; /* * Preserve the freelink that is on disk. clear_unlinked_inodedep() * does not have access to the in-core ip so must write directly into * the inode block buffer when setting freelink. */ if (fs->fs_magic == FS_UFS1_MAGIC) DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number))->di_freelink); else DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number))->di_freelink); /* * If the effective link count is not equal to the actual link * count, then we must track the difference in an inodedep while * the inode is (potentially) tossed out of the cache. Otherwise, * if there is no existing inodedep, then there are no dependencies * to track. */ ACQUIRE_LOCK(ump); again: if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { FREE_LOCK(ump); if (ip->i_effnlink != ip->i_nlink) panic("softdep_update_inodeblock: bad link count"); return; } if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) panic("softdep_update_inodeblock: bad delta"); /* * If we're flushing all dependencies we must also move any waiting * for journal writes onto the bufwait list prior to I/O. */ if (waitfor) { TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) == DEPCOMPLETE) { jwait(&inoref->if_list, MNT_WAIT); goto again; } } } /* * Changes have been initiated. Anything depending on these * changes cannot occur until this inode has been written. */ inodedep->id_state &= ~COMPLETE; if ((inodedep->id_state & ONWORKLIST) == 0) WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); /* * Any new dependencies associated with the incore inode must * now be moved to the list associated with the buffer holding * the in-memory copy of the inode. Once merged process any * allocdirects that are completed by the merger. */ merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); if (!TAILQ_EMPTY(&inodedep->id_inoupdt)) handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt), NULL); merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); if (!TAILQ_EMPTY(&inodedep->id_extupdt)) handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt), NULL); /* * Now that the inode has been pushed into the buffer, the * operations dependent on the inode being written to disk * can be moved to the id_bufwait so that they will be * processed when the buffer I/O completes. */ while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { WORKLIST_REMOVE(wk); WORKLIST_INSERT(&inodedep->id_bufwait, wk); } /* * Newly allocated inodes cannot be written until the bitmap * that allocates them have been written (indicated by * DEPCOMPLETE being set in id_state). If we are doing a * forced sync (e.g., an fsync on a file), we force the bitmap * to be written so that the update can be done. */ if (waitfor == 0) { FREE_LOCK(ump); return; } retry: if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) { FREE_LOCK(ump); return; } ibp = inodedep->id_bmsafemap->sm_buf; ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT); if (ibp == NULL) { /* * If ibp came back as NULL, the dependency could have been * freed while we slept. Look it up again, and check to see * that it has completed. */ if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) goto retry; FREE_LOCK(ump); return; } FREE_LOCK(ump); if ((error = bwrite(ibp)) != 0) softdep_error("softdep_update_inodeblock: bwrite", error); } /* * Merge the a new inode dependency list (such as id_newinoupdt) into an * old inode dependency list (such as id_inoupdt). This routine must be * called with splbio interrupts blocked. */ static void merge_inode_lists(newlisthead, oldlisthead) struct allocdirectlst *newlisthead; struct allocdirectlst *oldlisthead; { struct allocdirect *listadp, *newadp; newadp = TAILQ_FIRST(newlisthead); for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { if (listadp->ad_offset < newadp->ad_offset) { listadp = TAILQ_NEXT(listadp, ad_next); continue; } TAILQ_REMOVE(newlisthead, newadp, ad_next); TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); if (listadp->ad_offset == newadp->ad_offset) { allocdirect_merge(oldlisthead, newadp, listadp); listadp = newadp; } newadp = TAILQ_FIRST(newlisthead); } while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { TAILQ_REMOVE(newlisthead, newadp, ad_next); TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); } } /* * If we are doing an fsync, then we must ensure that any directory * entries for the inode have been written after the inode gets to disk. */ int softdep_fsync(vp) struct vnode *vp; /* the "in_core" copy of the inode */ { struct inodedep *inodedep; struct pagedep *pagedep; struct inoref *inoref; struct ufsmount *ump; struct worklist *wk; struct diradd *dap; struct mount *mp; struct vnode *pvp; struct inode *ip; struct buf *bp; struct fs *fs; struct thread *td = curthread; int error, flushparent, pagedep_new_block; ino_t parentino; ufs_lbn_t lbn; ip = VTOI(vp); mp = vp->v_mount; ump = VFSTOUFS(mp); fs = ump->um_fs; if (MOUNTEDSOFTDEP(mp) == 0) return (0); ACQUIRE_LOCK(ump); restart: if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { FREE_LOCK(ump); return (0); } TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) == DEPCOMPLETE) { jwait(&inoref->if_list, MNT_WAIT); goto restart; } } if (!LIST_EMPTY(&inodedep->id_inowait) || !TAILQ_EMPTY(&inodedep->id_extupdt) || !TAILQ_EMPTY(&inodedep->id_newextupdt) || !TAILQ_EMPTY(&inodedep->id_inoupdt) || !TAILQ_EMPTY(&inodedep->id_newinoupdt)) panic("softdep_fsync: pending ops %p", inodedep); for (error = 0, flushparent = 0; ; ) { if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) break; if (wk->wk_type != D_DIRADD) panic("softdep_fsync: Unexpected type %s", TYPENAME(wk->wk_type)); dap = WK_DIRADD(wk); /* * Flush our parent if this directory entry has a MKDIR_PARENT * dependency or is contained in a newly allocated block. */ if (dap->da_state & DIRCHG) pagedep = dap->da_previous->dm_pagedep; else pagedep = dap->da_pagedep; parentino = pagedep->pd_ino; lbn = pagedep->pd_lbn; if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) panic("softdep_fsync: dirty"); if ((dap->da_state & MKDIR_PARENT) || (pagedep->pd_state & NEWBLOCK)) flushparent = 1; else flushparent = 0; /* * If we are being fsync'ed as part of vgone'ing this vnode, * then we will not be able to release and recover the * vnode below, so we just have to give up on writing its * directory entry out. It will eventually be written, just * not now, but then the user was not asking to have it * written, so we are not breaking any promises. */ if (vp->v_iflag & VI_DOOMED) break; /* * We prevent deadlock by always fetching inodes from the * root, moving down the directory tree. Thus, when fetching * our parent directory, we first try to get the lock. If * that fails, we must unlock ourselves before requesting * the lock on our parent. See the comment in ufs_lookup * for details on possible races. */ FREE_LOCK(ump); if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp, FFSV_FORCEINSMQ)) { error = vfs_busy(mp, MBF_NOWAIT); if (error != 0) { vfs_ref(mp); VOP_UNLOCK(vp, 0); error = vfs_busy(mp, 0); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vfs_rel(mp); if (error != 0) return (ENOENT); if (vp->v_iflag & VI_DOOMED) { vfs_unbusy(mp); return (ENOENT); } } VOP_UNLOCK(vp, 0); error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, &pvp, FFSV_FORCEINSMQ); vfs_unbusy(mp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (vp->v_iflag & VI_DOOMED) { if (error == 0) vput(pvp); error = ENOENT; } if (error != 0) return (error); } /* * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps * that are contained in direct blocks will be resolved by * doing a ffs_update. Pagedeps contained in indirect blocks * may require a complete sync'ing of the directory. So, we * try the cheap and fast ffs_update first, and if that fails, * then we do the slower ffs_syncvnode of the directory. */ if (flushparent) { int locked; if ((error = ffs_update(pvp, 1)) != 0) { vput(pvp); return (error); } ACQUIRE_LOCK(ump); locked = 1; if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) { if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) { if (wk->wk_type != D_DIRADD) panic("softdep_fsync: Unexpected type %s", TYPENAME(wk->wk_type)); dap = WK_DIRADD(wk); if (dap->da_state & DIRCHG) pagedep = dap->da_previous->dm_pagedep; else pagedep = dap->da_pagedep; pagedep_new_block = pagedep->pd_state & NEWBLOCK; FREE_LOCK(ump); locked = 0; if (pagedep_new_block && (error = ffs_syncvnode(pvp, MNT_WAIT, 0))) { vput(pvp); return (error); } } } if (locked) FREE_LOCK(ump); } /* * Flush directory page containing the inode's name. */ error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, &bp); if (error == 0) error = bwrite(bp); else brelse(bp); vput(pvp); if (error != 0) return (error); ACQUIRE_LOCK(ump); if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) break; } FREE_LOCK(ump); return (0); } /* * Flush all the dirty bitmaps associated with the block device * before flushing the rest of the dirty blocks so as to reduce * the number of dependencies that will have to be rolled back. * * XXX Unused? */ void softdep_fsync_mountdev(vp) struct vnode *vp; { struct buf *bp, *nbp; struct worklist *wk; struct bufobj *bo; if (!vn_isdisk(vp, NULL)) panic("softdep_fsync_mountdev: vnode not a disk"); bo = &vp->v_bufobj; restart: BO_LOCK(bo); TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { /* * If it is already scheduled, skip to the next buffer. */ if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) continue; if ((bp->b_flags & B_DELWRI) == 0) panic("softdep_fsync_mountdev: not dirty"); /* * We are only interested in bitmaps with outstanding * dependencies. */ if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || wk->wk_type != D_BMSAFEMAP || (bp->b_vflags & BV_BKGRDINPROG)) { BUF_UNLOCK(bp); continue; } BO_UNLOCK(bo); bremfree(bp); (void) bawrite(bp); goto restart; } drain_output(vp); BO_UNLOCK(bo); } /* * Sync all cylinder groups that were dirty at the time this function is * called. Newly dirtied cgs will be inserted before the sentinel. This * is used to flush freedep activity that may be holding up writes to a * indirect block. */ static int sync_cgs(mp, waitfor) struct mount *mp; int waitfor; { struct bmsafemap *bmsafemap; struct bmsafemap *sentinel; struct ufsmount *ump; struct buf *bp; int error; sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK); sentinel->sm_cg = -1; ump = VFSTOUFS(mp); error = 0; ACQUIRE_LOCK(ump); LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next); for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL; bmsafemap = LIST_NEXT(sentinel, sm_next)) { /* Skip sentinels and cgs with no work to release. */ if (bmsafemap->sm_cg == -1 || (LIST_EMPTY(&bmsafemap->sm_freehd) && LIST_EMPTY(&bmsafemap->sm_freewr))) { LIST_REMOVE(sentinel, sm_next); LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); continue; } /* * If we don't get the lock and we're waiting try again, if * not move on to the next buf and try to sync it. */ bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor); if (bp == NULL && waitfor == MNT_WAIT) continue; LIST_REMOVE(sentinel, sm_next); LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); if (bp == NULL) continue; FREE_LOCK(ump); if (waitfor == MNT_NOWAIT) bawrite(bp); else error = bwrite(bp); ACQUIRE_LOCK(ump); if (error) break; } LIST_REMOVE(sentinel, sm_next); FREE_LOCK(ump); free(sentinel, M_BMSAFEMAP); return (error); } /* * This routine is called when we are trying to synchronously flush a * file. This routine must eliminate any filesystem metadata dependencies * so that the syncing routine can succeed. */ int softdep_sync_metadata(struct vnode *vp) { struct inode *ip; int error; ip = VTOI(vp); KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, ("softdep_sync_metadata called on non-softdep filesystem")); /* * Ensure that any direct block dependencies have been cleared, * truncations are started, and inode references are journaled. */ ACQUIRE_LOCK(VFSTOUFS(vp->v_mount)); /* * Write all journal records to prevent rollbacks on devvp. */ if (vp->v_type == VCHR) softdep_flushjournal(vp->v_mount); error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number); /* * Ensure that all truncates are written so we won't find deps on * indirect blocks. */ process_truncates(vp); FREE_LOCK(VFSTOUFS(vp->v_mount)); return (error); } /* * This routine is called when we are attempting to sync a buf with * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any * other IO it can but returns EBUSY if the buffer is not yet able to * be written. Dependencies which will not cause rollbacks will always * return 0. */ int softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) { struct indirdep *indirdep; struct pagedep *pagedep; struct allocindir *aip; struct newblk *newblk; struct ufsmount *ump; struct buf *nbp; struct worklist *wk; int i, error; KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, ("softdep_sync_buf called on non-softdep filesystem")); /* * For VCHR we just don't want to force flush any dependencies that * will cause rollbacks. */ if (vp->v_type == VCHR) { if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0)) return (EBUSY); return (0); } ump = VFSTOUFS(vp->v_mount); ACQUIRE_LOCK(ump); /* * As we hold the buffer locked, none of its dependencies * will disappear. */ error = 0; top: LIST_FOREACH(wk, &bp->b_dep, wk_list) { switch (wk->wk_type) { case D_ALLOCDIRECT: case D_ALLOCINDIR: newblk = WK_NEWBLK(wk); if (newblk->nb_jnewblk != NULL) { if (waitfor == MNT_NOWAIT) { error = EBUSY; goto out_unlock; } jwait(&newblk->nb_jnewblk->jn_list, waitfor); goto top; } if (newblk->nb_state & DEPCOMPLETE || waitfor == MNT_NOWAIT) continue; nbp = newblk->nb_bmsafemap->sm_buf; nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); if (nbp == NULL) goto top; FREE_LOCK(ump); if ((error = bwrite(nbp)) != 0) goto out; ACQUIRE_LOCK(ump); continue; case D_INDIRDEP: indirdep = WK_INDIRDEP(wk); if (waitfor == MNT_NOWAIT) { if (!TAILQ_EMPTY(&indirdep->ir_trunc) || !LIST_EMPTY(&indirdep->ir_deplisthd)) { error = EBUSY; goto out_unlock; } } if (!TAILQ_EMPTY(&indirdep->ir_trunc)) panic("softdep_sync_buf: truncation pending."); restart: LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { newblk = (struct newblk *)aip; if (newblk->nb_jnewblk != NULL) { jwait(&newblk->nb_jnewblk->jn_list, waitfor); goto restart; } if (newblk->nb_state & DEPCOMPLETE) continue; nbp = newblk->nb_bmsafemap->sm_buf; nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); if (nbp == NULL) goto restart; FREE_LOCK(ump); if ((error = bwrite(nbp)) != 0) goto out; ACQUIRE_LOCK(ump); goto restart; } continue; case D_PAGEDEP: /* * Only flush directory entries in synchronous passes. */ if (waitfor != MNT_WAIT) { error = EBUSY; goto out_unlock; } /* * While syncing snapshots, we must allow recursive * lookups. */ BUF_AREC(bp); /* * We are trying to sync a directory that may * have dependencies on both its own metadata * and/or dependencies on the inodes of any * recently allocated files. We walk its diradd * lists pushing out the associated inode. */ pagedep = WK_PAGEDEP(wk); for (i = 0; i < DAHASHSZ; i++) { if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) continue; if ((error = flush_pagedep_deps(vp, wk->wk_mp, &pagedep->pd_diraddhd[i]))) { BUF_NOREC(bp); goto out_unlock; } } BUF_NOREC(bp); continue; case D_FREEWORK: case D_FREEDEP: case D_JSEGDEP: case D_JNEWBLK: continue; default: panic("softdep_sync_buf: Unknown type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } out_unlock: FREE_LOCK(ump); out: return (error); } /* * Flush the dependencies associated with an inodedep. * Called with splbio blocked. */ static int flush_inodedep_deps(vp, mp, ino) struct vnode *vp; struct mount *mp; ino_t ino; { struct inodedep *inodedep; struct inoref *inoref; struct ufsmount *ump; int error, waitfor; /* * This work is done in two passes. The first pass grabs most * of the buffers and begins asynchronously writing them. The * only way to wait for these asynchronous writes is to sleep * on the filesystem vnode which may stay busy for a long time * if the filesystem is active. So, instead, we make a second * pass over the dependencies blocking on each write. In the * usual case we will be blocking against a write that we * initiated, so when it is done the dependency will have been * resolved. Thus the second pass is expected to end quickly. * We give a brief window at the top of the loop to allow * any pending I/O to complete. */ ump = VFSTOUFS(mp); LOCK_OWNED(ump); for (error = 0, waitfor = MNT_NOWAIT; ; ) { if (error) return (error); FREE_LOCK(ump); ACQUIRE_LOCK(ump); restart: if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) return (0); TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) == DEPCOMPLETE) { jwait(&inoref->if_list, MNT_WAIT); goto restart; } } if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || flush_deplist(&inodedep->id_extupdt, waitfor, &error) || flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) continue; /* * If pass2, we are done, otherwise do pass 2. */ if (waitfor == MNT_WAIT) break; waitfor = MNT_WAIT; } /* * Try freeing inodedep in case all dependencies have been removed. */ if (inodedep_lookup(mp, ino, 0, &inodedep) != 0) (void) free_inodedep(inodedep); return (0); } /* * Flush an inode dependency list. * Called with splbio blocked. */ static int flush_deplist(listhead, waitfor, errorp) struct allocdirectlst *listhead; int waitfor; int *errorp; { struct allocdirect *adp; struct newblk *newblk; struct ufsmount *ump; struct buf *bp; if ((adp = TAILQ_FIRST(listhead)) == NULL) return (0); ump = VFSTOUFS(adp->ad_list.wk_mp); LOCK_OWNED(ump); TAILQ_FOREACH(adp, listhead, ad_next) { newblk = (struct newblk *)adp; if (newblk->nb_jnewblk != NULL) { jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); return (1); } if (newblk->nb_state & DEPCOMPLETE) continue; bp = newblk->nb_bmsafemap->sm_buf; bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor); if (bp == NULL) { if (waitfor == MNT_NOWAIT) continue; return (1); } FREE_LOCK(ump); if (waitfor == MNT_NOWAIT) bawrite(bp); else *errorp = bwrite(bp); ACQUIRE_LOCK(ump); return (1); } return (0); } /* * Flush dependencies associated with an allocdirect block. */ static int flush_newblk_dep(vp, mp, lbn) struct vnode *vp; struct mount *mp; ufs_lbn_t lbn; { struct newblk *newblk; struct ufsmount *ump; struct bufobj *bo; struct inode *ip; struct buf *bp; ufs2_daddr_t blkno; int error; error = 0; bo = &vp->v_bufobj; ip = VTOI(vp); blkno = DIP(ip, i_db[lbn]); if (blkno == 0) panic("flush_newblk_dep: Missing block"); ump = VFSTOUFS(mp); ACQUIRE_LOCK(ump); /* * Loop until all dependencies related to this block are satisfied. * We must be careful to restart after each sleep in case a write * completes some part of this process for us. */ for (;;) { if (newblk_lookup(mp, blkno, 0, &newblk) == 0) { FREE_LOCK(ump); break; } if (newblk->nb_list.wk_type != D_ALLOCDIRECT) panic("flush_newblk_dep: Bad newblk %p", newblk); /* * Flush the journal. */ if (newblk->nb_jnewblk != NULL) { jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); continue; } /* * Write the bitmap dependency. */ if ((newblk->nb_state & DEPCOMPLETE) == 0) { bp = newblk->nb_bmsafemap->sm_buf; bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); if (bp == NULL) continue; FREE_LOCK(ump); error = bwrite(bp); if (error) break; ACQUIRE_LOCK(ump); continue; } /* * Write the buffer. */ FREE_LOCK(ump); BO_LOCK(bo); bp = gbincore(bo, lbn); if (bp != NULL) { error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo)); if (error == ENOLCK) { ACQUIRE_LOCK(ump); error = 0; continue; /* Slept, retry */ } if (error != 0) break; /* Failed */ if (bp->b_flags & B_DELWRI) { bremfree(bp); error = bwrite(bp); if (error) break; } else BUF_UNLOCK(bp); } else BO_UNLOCK(bo); /* * We have to wait for the direct pointers to * point at the newdirblk before the dependency * will go away. */ error = ffs_update(vp, 1); if (error) break; ACQUIRE_LOCK(ump); } return (error); } /* * Eliminate a pagedep dependency by flushing out all its diradd dependencies. * Called with splbio blocked. */ static int flush_pagedep_deps(pvp, mp, diraddhdp) struct vnode *pvp; struct mount *mp; struct diraddhd *diraddhdp; { struct inodedep *inodedep; struct inoref *inoref; struct ufsmount *ump; struct diradd *dap; struct vnode *vp; int error = 0; struct buf *bp; ino_t inum; struct diraddhd unfinished; LIST_INIT(&unfinished); ump = VFSTOUFS(mp); LOCK_OWNED(ump); restart: while ((dap = LIST_FIRST(diraddhdp)) != NULL) { /* * Flush ourselves if this directory entry * has a MKDIR_PARENT dependency. */ if (dap->da_state & MKDIR_PARENT) { FREE_LOCK(ump); if ((error = ffs_update(pvp, 1)) != 0) break; ACQUIRE_LOCK(ump); /* * If that cleared dependencies, go on to next. */ if (dap != LIST_FIRST(diraddhdp)) continue; /* * All MKDIR_PARENT dependencies and all the * NEWBLOCK pagedeps that are contained in direct * blocks were resolved by doing above ffs_update. * Pagedeps contained in indirect blocks may * require a complete sync'ing of the directory. * We are in the midst of doing a complete sync, * so if they are not resolved in this pass we * defer them for now as they will be sync'ed by * our caller shortly. */ LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(&unfinished, dap, da_pdlist); continue; } /* * A newly allocated directory must have its "." and * ".." entries written out before its name can be * committed in its parent. */ inum = dap->da_newinum; if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) panic("flush_pagedep_deps: lost inode1"); /* * Wait for any pending journal adds to complete so we don't * cause rollbacks while syncing. */ TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) == DEPCOMPLETE) { jwait(&inoref->if_list, MNT_WAIT); goto restart; } } if (dap->da_state & MKDIR_BODY) { FREE_LOCK(ump); if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, FFSV_FORCEINSMQ))) break; error = flush_newblk_dep(vp, mp, 0); /* * If we still have the dependency we might need to * update the vnode to sync the new link count to * disk. */ if (error == 0 && dap == LIST_FIRST(diraddhdp)) error = ffs_update(vp, 1); vput(vp); if (error != 0) break; ACQUIRE_LOCK(ump); /* * If that cleared dependencies, go on to next. */ if (dap != LIST_FIRST(diraddhdp)) continue; if (dap->da_state & MKDIR_BODY) { inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); panic("flush_pagedep_deps: MKDIR_BODY " "inodedep %p dap %p vp %p", inodedep, dap, vp); } } /* * Flush the inode on which the directory entry depends. * Having accounted for MKDIR_PARENT and MKDIR_BODY above, * the only remaining dependency is that the updated inode * count must get pushed to disk. The inode has already * been pushed into its inode buffer (via VOP_UPDATE) at * the time of the reference count change. So we need only * locate that buffer, ensure that there will be no rollback * caused by a bitmap dependency, then write the inode buffer. */ retry: if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) panic("flush_pagedep_deps: lost inode"); /* * If the inode still has bitmap dependencies, * push them to disk. */ if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) { bp = inodedep->id_bmsafemap->sm_buf; bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); if (bp == NULL) goto retry; FREE_LOCK(ump); if ((error = bwrite(bp)) != 0) break; ACQUIRE_LOCK(ump); if (dap != LIST_FIRST(diraddhdp)) continue; } /* * If the inode is still sitting in a buffer waiting * to be written or waiting for the link count to be * adjusted update it here to flush it to disk. */ if (dap == LIST_FIRST(diraddhdp)) { FREE_LOCK(ump); if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, FFSV_FORCEINSMQ))) break; error = ffs_update(vp, 1); vput(vp); if (error) break; ACQUIRE_LOCK(ump); } /* * If we have failed to get rid of all the dependencies * then something is seriously wrong. */ if (dap == LIST_FIRST(diraddhdp)) { inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); panic("flush_pagedep_deps: failed to flush " "inodedep %p ino %ju dap %p", inodedep, (uintmax_t)inum, dap); } } if (error) ACQUIRE_LOCK(ump); while ((dap = LIST_FIRST(&unfinished)) != NULL) { LIST_REMOVE(dap, da_pdlist); LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist); } return (error); } /* * A large burst of file addition or deletion activity can drive the * memory load excessively high. First attempt to slow things down * using the techniques below. If that fails, this routine requests * the offending operations to fall back to running synchronously * until the memory load returns to a reasonable level. */ int softdep_slowdown(vp) struct vnode *vp; { struct ufsmount *ump; int jlow; int max_softdeps_hard; KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, ("softdep_slowdown called on non-softdep filesystem")); ump = VFSTOUFS(vp->v_mount); ACQUIRE_LOCK(ump); jlow = 0; /* * Check for journal space if needed. */ if (DOINGSUJ(vp)) { if (journal_space(ump, 0) == 0) jlow = 1; } /* * If the system is under its limits and our filesystem is * not responsible for more than our share of the usage and * we are not low on journal space, then no need to slow down. */ max_softdeps_hard = max_softdeps * 11 / 10; if (dep_current[D_DIRREM] < max_softdeps_hard / 2 && dep_current[D_INODEDEP] < max_softdeps_hard && dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 && dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 && ump->softdep_curdeps[D_DIRREM] < (max_softdeps_hard / 2) / stat_flush_threads && ump->softdep_curdeps[D_INODEDEP] < max_softdeps_hard / stat_flush_threads && ump->softdep_curdeps[D_INDIRDEP] < (max_softdeps_hard / 1000) / stat_flush_threads && ump->softdep_curdeps[D_FREEBLKS] < max_softdeps_hard / stat_flush_threads) { FREE_LOCK(ump); return (0); } /* * If the journal is low or our filesystem is over its limit * then speedup the cleanup. */ if (ump->softdep_curdeps[D_INDIRDEP] < (max_softdeps_hard / 1000) / stat_flush_threads || jlow) softdep_speedup(ump); stat_sync_limit_hit += 1; FREE_LOCK(ump); /* * We only slow down the rate at which new dependencies are * generated if we are not using journaling. With journaling, * the cleanup should always be sufficient to keep things * under control. */ if (DOINGSUJ(vp)) return (0); return (1); } /* * Called by the allocation routines when they are about to fail * in the hope that we can free up the requested resource (inodes * or disk space). * * First check to see if the work list has anything on it. If it has, * clean up entries until we successfully free the requested resource. * Because this process holds inodes locked, we cannot handle any remove * requests that might block on a locked inode as that could lead to * deadlock. If the worklist yields none of the requested resource, * start syncing out vnodes to free up the needed space. */ int softdep_request_cleanup(fs, vp, cred, resource) struct fs *fs; struct vnode *vp; struct ucred *cred; int resource; { struct ufsmount *ump; struct mount *mp; long starttime; ufs2_daddr_t needed; int error, failed_vnode; /* * If we are being called because of a process doing a * copy-on-write, then it is not safe to process any * worklist items as we will recurse into the copyonwrite * routine. This will result in an incoherent snapshot. * If the vnode that we hold is a snapshot, we must avoid * handling other resources that could cause deadlock. */ if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp))) return (0); if (resource == FLUSH_BLOCKS_WAIT) stat_cleanup_blkrequests += 1; else stat_cleanup_inorequests += 1; mp = vp->v_mount; ump = VFSTOUFS(mp); mtx_assert(UFS_MTX(ump), MA_OWNED); UFS_UNLOCK(ump); error = ffs_update(vp, 1); if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) { UFS_LOCK(ump); return (0); } /* * If we are in need of resources, start by cleaning up * any block removals associated with our inode. */ ACQUIRE_LOCK(ump); process_removes(vp); process_truncates(vp); FREE_LOCK(ump); /* * Now clean up at least as many resources as we will need. * * When requested to clean up inodes, the number that are needed * is set by the number of simultaneous writers (mnt_writeopcount) * plus a bit of slop (2) in case some more writers show up while * we are cleaning. * * When requested to free up space, the amount of space that * we need is enough blocks to allocate a full-sized segment * (fs_contigsumsize). The number of such segments that will * be needed is set by the number of simultaneous writers * (mnt_writeopcount) plus a bit of slop (2) in case some more * writers show up while we are cleaning. * * Additionally, if we are unpriviledged and allocating space, * we need to ensure that we clean up enough blocks to get the * needed number of blocks over the threshold of the minimum * number of blocks required to be kept free by the filesystem * (fs_minfree). */ if (resource == FLUSH_INODES_WAIT) { needed = vp->v_mount->mnt_writeopcount + 2; } else if (resource == FLUSH_BLOCKS_WAIT) { needed = (vp->v_mount->mnt_writeopcount + 2) * fs->fs_contigsumsize; if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0)) needed += fragstoblks(fs, roundup((fs->fs_dsize * fs->fs_minfree / 100) - fs->fs_cstotal.cs_nffree, fs->fs_frag)); } else { UFS_LOCK(ump); printf("softdep_request_cleanup: Unknown resource type %d\n", resource); return (0); } starttime = time_second; retry: if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 && fs->fs_cstotal.cs_nbfree <= needed) || (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && fs->fs_cstotal.cs_nifree <= needed)) { ACQUIRE_LOCK(ump); if (ump->softdep_on_worklist > 0 && process_worklist_item(UFSTOVFS(ump), ump->softdep_on_worklist, LK_NOWAIT) != 0) stat_worklist_push += 1; FREE_LOCK(ump); } /* * If we still need resources and there are no more worklist * entries to process to obtain them, we have to start flushing * the dirty vnodes to force the release of additional requests * to the worklist that we can then process to reap addition * resources. We walk the vnodes associated with the mount point * until we get the needed worklist requests that we can reap. * * If there are several threads all needing to clean the same * mount point, only one is allowed to walk the mount list. * When several threads all try to walk the same mount list, * they end up competing with each other and often end up in * livelock. This approach ensures that forward progress is * made at the cost of occational ENOSPC errors being returned * that might otherwise have been avoided. */ error = 1; if ((resource == FLUSH_BLOCKS_WAIT && fs->fs_cstotal.cs_nbfree <= needed) || (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && fs->fs_cstotal.cs_nifree <= needed)) { ACQUIRE_LOCK(ump); if ((ump->um_softdep->sd_flags & FLUSH_RC_ACTIVE) == 0) { ump->um_softdep->sd_flags |= FLUSH_RC_ACTIVE; FREE_LOCK(ump); failed_vnode = softdep_request_cleanup_flush(mp, ump); ACQUIRE_LOCK(ump); ump->um_softdep->sd_flags &= ~FLUSH_RC_ACTIVE; FREE_LOCK(ump); if (ump->softdep_on_worklist > 0) { stat_cleanup_retries += 1; if (!failed_vnode) goto retry; } } else { FREE_LOCK(ump); error = 0; } stat_cleanup_failures += 1; } if (time_second - starttime > stat_cleanup_high_delay) stat_cleanup_high_delay = time_second - starttime; UFS_LOCK(ump); return (error); } /* * Scan the vnodes for the specified mount point flushing out any * vnodes that can be locked without waiting. Finally, try to flush * the device associated with the mount point if it can be locked * without waiting. * * We return 0 if we were able to lock every vnode in our scan. * If we had to skip one or more vnodes, we return 1. */ static int softdep_request_cleanup_flush(mp, ump) struct mount *mp; struct ufsmount *ump; { struct thread *td; struct vnode *lvp, *mvp; int failed_vnode; failed_vnode = 0; td = curthread; MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { VI_UNLOCK(lvp); continue; } if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT, td) != 0) { failed_vnode = 1; continue; } if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */ vput(lvp); continue; } (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0); vput(lvp); } lvp = ump->um_devvp; if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { VOP_FSYNC(lvp, MNT_NOWAIT, td); VOP_UNLOCK(lvp, 0); } return (failed_vnode); } static bool softdep_excess_items(struct ufsmount *ump, int item) { KASSERT(item >= 0 && item < D_LAST, ("item %d", item)); return (dep_current[item] > max_softdeps && ump->softdep_curdeps[item] > max_softdeps / stat_flush_threads); } static void schedule_cleanup(struct mount *mp) { struct ufsmount *ump; struct thread *td; ump = VFSTOUFS(mp); LOCK_OWNED(ump); FREE_LOCK(ump); td = curthread; if ((td->td_pflags & TDP_KTHREAD) != 0 && (td->td_proc->p_flag2 & P2_AST_SU) == 0) { /* * No ast is delivered to kernel threads, so nobody * would deref the mp. Some kernel threads * explicitely check for AST, e.g. NFS daemon does * this in the serving loop. */ return; } if (td->td_su != NULL) vfs_rel(td->td_su); vfs_ref(mp); td->td_su = mp; thread_lock(td); td->td_flags |= TDF_ASTPENDING; thread_unlock(td); } static void softdep_ast_cleanup_proc(struct thread *td) { struct mount *mp; struct ufsmount *ump; int error; bool req; while ((mp = td->td_su) != NULL) { td->td_su = NULL; error = vfs_busy(mp, MBF_NOWAIT); vfs_rel(mp); if (error != 0) return; if (ffs_own_mount(mp) && MOUNTEDSOFTDEP(mp)) { ump = VFSTOUFS(mp); for (;;) { req = false; ACQUIRE_LOCK(ump); if (softdep_excess_items(ump, D_INODEDEP)) { req = true; request_cleanup(mp, FLUSH_INODES); } if (softdep_excess_items(ump, D_DIRREM)) { req = true; request_cleanup(mp, FLUSH_BLOCKS); } FREE_LOCK(ump); if (softdep_excess_items(ump, D_NEWBLK) || softdep_excess_items(ump, D_ALLOCDIRECT) || softdep_excess_items(ump, D_ALLOCINDIR)) { error = vn_start_write(NULL, &mp, V_WAIT); if (error == 0) { req = true; VFS_SYNC(mp, MNT_WAIT); vn_finished_write(mp); } } if ((td->td_pflags & TDP_KTHREAD) != 0 || !req) break; } } vfs_unbusy(mp); } if ((mp = td->td_su) != NULL) { td->td_su = NULL; vfs_rel(mp); } } /* * If memory utilization has gotten too high, deliberately slow things * down and speed up the I/O processing. */ static int request_cleanup(mp, resource) struct mount *mp; int resource; { struct thread *td = curthread; struct ufsmount *ump; ump = VFSTOUFS(mp); LOCK_OWNED(ump); /* * We never hold up the filesystem syncer or buf daemon. */ if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF)) return (0); /* * First check to see if the work list has gotten backlogged. * If it has, co-opt this process to help clean up two entries. * Because this process may hold inodes locked, we cannot * handle any remove requests that might block on a locked * inode as that could lead to deadlock. We set TDP_SOFTDEP * to avoid recursively processing the worklist. */ if (ump->softdep_on_worklist > max_softdeps / 10) { td->td_pflags |= TDP_SOFTDEP; process_worklist_item(mp, 2, LK_NOWAIT); td->td_pflags &= ~TDP_SOFTDEP; stat_worklist_push += 2; return(1); } /* * Next, we attempt to speed up the syncer process. If that * is successful, then we allow the process to continue. */ if (softdep_speedup(ump) && resource != FLUSH_BLOCKS_WAIT && resource != FLUSH_INODES_WAIT) return(0); /* * If we are resource constrained on inode dependencies, try * flushing some dirty inodes. Otherwise, we are constrained * by file deletions, so try accelerating flushes of directories * with removal dependencies. We would like to do the cleanup * here, but we probably hold an inode locked at this point and * that might deadlock against one that we try to clean. So, * the best that we can do is request the syncer daemon to do * the cleanup for us. */ switch (resource) { case FLUSH_INODES: case FLUSH_INODES_WAIT: ACQUIRE_GBLLOCK(&lk); stat_ino_limit_push += 1; req_clear_inodedeps += 1; FREE_GBLLOCK(&lk); stat_countp = &stat_ino_limit_hit; break; case FLUSH_BLOCKS: case FLUSH_BLOCKS_WAIT: ACQUIRE_GBLLOCK(&lk); stat_blk_limit_push += 1; req_clear_remove += 1; FREE_GBLLOCK(&lk); stat_countp = &stat_blk_limit_hit; break; default: panic("request_cleanup: unknown type"); } /* * Hopefully the syncer daemon will catch up and awaken us. * We wait at most tickdelay before proceeding in any case. */ ACQUIRE_GBLLOCK(&lk); FREE_LOCK(ump); proc_waiting += 1; if (callout_pending(&softdep_callout) == FALSE) callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, pause_timer, 0); if ((td->td_pflags & TDP_KTHREAD) == 0) msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0); proc_waiting -= 1; FREE_GBLLOCK(&lk); ACQUIRE_LOCK(ump); return (1); } /* * Awaken processes pausing in request_cleanup and clear proc_waiting * to indicate that there is no longer a timer running. Pause_timer * will be called with the global softdep mutex (&lk) locked. */ static void pause_timer(arg) void *arg; { GBLLOCK_OWNED(&lk); /* * The callout_ API has acquired mtx and will hold it around this * function call. */ *stat_countp += proc_waiting; wakeup(&proc_waiting); } /* * If requested, try removing inode or removal dependencies. */ static void check_clear_deps(mp) struct mount *mp; { /* * If we are suspended, it may be because of our using * too many inodedeps, so help clear them out. */ if (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended) clear_inodedeps(mp); /* * General requests for cleanup of backed up dependencies */ ACQUIRE_GBLLOCK(&lk); if (req_clear_inodedeps) { req_clear_inodedeps -= 1; FREE_GBLLOCK(&lk); clear_inodedeps(mp); ACQUIRE_GBLLOCK(&lk); wakeup(&proc_waiting); } if (req_clear_remove) { req_clear_remove -= 1; FREE_GBLLOCK(&lk); clear_remove(mp); ACQUIRE_GBLLOCK(&lk); wakeup(&proc_waiting); } FREE_GBLLOCK(&lk); } /* * Flush out a directory with at least one removal dependency in an effort to * reduce the number of dirrem, freefile, and freeblks dependency structures. */ static void clear_remove(mp) struct mount *mp; { struct pagedep_hashhead *pagedephd; struct pagedep *pagedep; struct ufsmount *ump; struct vnode *vp; struct bufobj *bo; int error, cnt; ino_t ino; ump = VFSTOUFS(mp); LOCK_OWNED(ump); for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) { pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++]; if (ump->pagedep_nextclean > ump->pagedep_hash_size) ump->pagedep_nextclean = 0; LIST_FOREACH(pagedep, pagedephd, pd_hash) { if (LIST_EMPTY(&pagedep->pd_dirremhd)) continue; ino = pagedep->pd_ino; if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) continue; FREE_LOCK(ump); /* * Let unmount clear deps */ error = vfs_busy(mp, MBF_NOWAIT); if (error != 0) goto finish_write; error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, FFSV_FORCEINSMQ); vfs_unbusy(mp); if (error != 0) { softdep_error("clear_remove: vget", error); goto finish_write; } if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) softdep_error("clear_remove: fsync", error); bo = &vp->v_bufobj; BO_LOCK(bo); drain_output(vp); BO_UNLOCK(bo); vput(vp); finish_write: vn_finished_write(mp); ACQUIRE_LOCK(ump); return; } } } /* * Clear out a block of dirty inodes in an effort to reduce * the number of inodedep dependency structures. */ static void clear_inodedeps(mp) struct mount *mp; { struct inodedep_hashhead *inodedephd; struct inodedep *inodedep; struct ufsmount *ump; struct vnode *vp; struct fs *fs; int error, cnt; ino_t firstino, lastino, ino; ump = VFSTOUFS(mp); fs = ump->um_fs; LOCK_OWNED(ump); /* * Pick a random inode dependency to be cleared. * We will then gather up all the inodes in its block * that have dependencies and flush them out. */ for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) { inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++]; if (ump->inodedep_nextclean > ump->inodedep_hash_size) ump->inodedep_nextclean = 0; if ((inodedep = LIST_FIRST(inodedephd)) != NULL) break; } if (inodedep == NULL) return; /* * Find the last inode in the block with dependencies. */ firstino = rounddown2(inodedep->id_ino, INOPB(fs)); for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0) break; /* * Asynchronously push all but the last inode with dependencies. * Synchronously push the last inode with dependencies to ensure * that the inode block gets written to free up the inodedeps. */ for (ino = firstino; ino <= lastino; ino++) { if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) continue; if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) continue; FREE_LOCK(ump); error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */ if (error != 0) { vn_finished_write(mp); ACQUIRE_LOCK(ump); return; } if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, FFSV_FORCEINSMQ)) != 0) { softdep_error("clear_inodedeps: vget", error); vfs_unbusy(mp); vn_finished_write(mp); ACQUIRE_LOCK(ump); return; } vfs_unbusy(mp); if (ino == lastino) { if ((error = ffs_syncvnode(vp, MNT_WAIT, 0))) softdep_error("clear_inodedeps: fsync1", error); } else { if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) softdep_error("clear_inodedeps: fsync2", error); BO_LOCK(&vp->v_bufobj); drain_output(vp); BO_UNLOCK(&vp->v_bufobj); } vput(vp); vn_finished_write(mp); ACQUIRE_LOCK(ump); } } void softdep_buf_append(bp, wkhd) struct buf *bp; struct workhead *wkhd; { struct worklist *wk; struct ufsmount *ump; if ((wk = LIST_FIRST(wkhd)) == NULL) return; KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, ("softdep_buf_append called on non-softdep filesystem")); ump = VFSTOUFS(wk->wk_mp); ACQUIRE_LOCK(ump); while ((wk = LIST_FIRST(wkhd)) != NULL) { WORKLIST_REMOVE(wk); WORKLIST_INSERT(&bp->b_dep, wk); } FREE_LOCK(ump); } void softdep_inode_append(ip, cred, wkhd) struct inode *ip; struct ucred *cred; struct workhead *wkhd; { struct buf *bp; struct fs *fs; struct ufsmount *ump; int error; ump = ITOUMP(ip); KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, ("softdep_inode_append called on non-softdep filesystem")); fs = ump->um_fs; error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->fs_bsize, cred, &bp); if (error) { bqrelse(bp); softdep_freework(wkhd); return; } softdep_buf_append(bp, wkhd); bqrelse(bp); } void softdep_freework(wkhd) struct workhead *wkhd; { struct worklist *wk; struct ufsmount *ump; if ((wk = LIST_FIRST(wkhd)) == NULL) return; KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, ("softdep_freework called on non-softdep filesystem")); ump = VFSTOUFS(wk->wk_mp); ACQUIRE_LOCK(ump); handle_jwork(wkhd); FREE_LOCK(ump); } static struct ufsmount * softdep_bp_to_mp(bp) struct buf *bp; { struct mount *mp; struct vnode *vp; if (LIST_EMPTY(&bp->b_dep)) return (NULL); vp = bp->b_vp; /* * The ump mount point is stable after we get a correct * pointer, since bp is locked and this prevents unmount from * proceeding. But to get to it, we cannot dereference bp->b_dep * head wk_mp, because we do not yet own SU ump lock and * workitem might be freed while dereferenced. */ retry: if (vp->v_type == VCHR) { VI_LOCK(vp); mp = vp->v_type == VCHR ? vp->v_rdev->si_mountpt : NULL; VI_UNLOCK(vp); if (mp == NULL) goto retry; } else if (vp->v_type == VREG || vp->v_type == VDIR || vp->v_type == VLNK) { mp = vp->v_mount; } else { return (NULL); } return (VFSTOUFS(mp)); } /* * Function to determine if the buffer has outstanding dependencies * that will cause a roll-back if the buffer is written. If wantcount * is set, return number of dependencies, otherwise just yes or no. */ static int softdep_count_dependencies(bp, wantcount) struct buf *bp; int wantcount; { struct worklist *wk; struct ufsmount *ump; struct bmsafemap *bmsafemap; struct freework *freework; struct inodedep *inodedep; struct indirdep *indirdep; struct freeblks *freeblks; struct allocindir *aip; struct pagedep *pagedep; struct dirrem *dirrem; struct newblk *newblk; struct mkdir *mkdir; struct diradd *dap; int i, retval; ump = softdep_bp_to_mp(bp); if (ump == NULL) return (0); retval = 0; ACQUIRE_LOCK(ump); LIST_FOREACH(wk, &bp->b_dep, wk_list) { switch (wk->wk_type) { case D_INODEDEP: inodedep = WK_INODEDEP(wk); if ((inodedep->id_state & DEPCOMPLETE) == 0) { /* bitmap allocation dependency */ retval += 1; if (!wantcount) goto out; } if (TAILQ_FIRST(&inodedep->id_inoupdt)) { /* direct block pointer dependency */ retval += 1; if (!wantcount) goto out; } if (TAILQ_FIRST(&inodedep->id_extupdt)) { /* direct block pointer dependency */ retval += 1; if (!wantcount) goto out; } if (TAILQ_FIRST(&inodedep->id_inoreflst)) { /* Add reference dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_INDIRDEP: indirdep = WK_INDIRDEP(wk); TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) { /* indirect truncation dependency */ retval += 1; if (!wantcount) goto out; } LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { /* indirect block pointer dependency */ retval += 1; if (!wantcount) goto out; } continue; case D_PAGEDEP: pagedep = WK_PAGEDEP(wk); LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { if (LIST_FIRST(&dirrem->dm_jremrefhd)) { /* Journal remove ref dependency. */ retval += 1; if (!wantcount) goto out; } } for (i = 0; i < DAHASHSZ; i++) { LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { /* directory entry dependency */ retval += 1; if (!wantcount) goto out; } } continue; case D_BMSAFEMAP: bmsafemap = WK_BMSAFEMAP(wk); if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) { /* Add reference dependency. */ retval += 1; if (!wantcount) goto out; } if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) { /* Allocate block dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_FREEBLKS: freeblks = WK_FREEBLKS(wk); if (LIST_FIRST(&freeblks->fb_jblkdephd)) { /* Freeblk journal dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_ALLOCDIRECT: case D_ALLOCINDIR: newblk = WK_NEWBLK(wk); if (newblk->nb_jnewblk) { /* Journal allocate dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_MKDIR: mkdir = WK_MKDIR(wk); if (mkdir->md_jaddref) { /* Journal reference dependency. */ retval += 1; if (!wantcount) goto out; } continue; case D_FREEWORK: case D_FREEDEP: case D_JSEGDEP: case D_JSEG: case D_SBDEP: /* never a dependency on these blocks */ continue; default: panic("softdep_count_dependencies: Unexpected type %s", TYPENAME(wk->wk_type)); /* NOTREACHED */ } } out: FREE_LOCK(ump); return (retval); } /* * Acquire exclusive access to a buffer. * Must be called with a locked mtx parameter. * Return acquired buffer or NULL on failure. */ static struct buf * getdirtybuf(bp, lock, waitfor) struct buf *bp; struct rwlock *lock; int waitfor; { int error; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) { if (waitfor != MNT_WAIT) return (NULL); error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock); /* * Even if we successfully acquire bp here, we have dropped * lock, which may violates our guarantee. */ if (error == 0) BUF_UNLOCK(bp); else if (error != ENOLCK) panic("getdirtybuf: inconsistent lock: %d", error); rw_wlock(lock); return (NULL); } if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) { rw_wunlock(lock); BO_LOCK(bp->b_bufobj); BUF_UNLOCK(bp); if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { bp->b_vflags |= BV_BKGRDWAIT; msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO | PDROP, "getbuf", 0); } else BO_UNLOCK(bp->b_bufobj); rw_wlock(lock); return (NULL); } BUF_UNLOCK(bp); if (waitfor != MNT_WAIT) return (NULL); #ifdef DEBUG_VFS_LOCKS if (bp->b_vp->v_type != VCHR) ASSERT_BO_WLOCKED(bp->b_bufobj); #endif bp->b_vflags |= BV_BKGRDWAIT; rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0); return (NULL); } if ((bp->b_flags & B_DELWRI) == 0) { BUF_UNLOCK(bp); return (NULL); } bremfree(bp); return (bp); } /* * Check if it is safe to suspend the file system now. On entry, * the vnode interlock for devvp should be held. Return 0 with * the mount interlock held if the file system can be suspended now, * otherwise return EAGAIN with the mount interlock held. */ int softdep_check_suspend(struct mount *mp, struct vnode *devvp, int softdep_depcnt, int softdep_accdepcnt, int secondary_writes, int secondary_accwrites) { struct bufobj *bo; struct ufsmount *ump; struct inodedep *inodedep; int error, unlinked; bo = &devvp->v_bufobj; ASSERT_BO_WLOCKED(bo); /* * If we are not running with soft updates, then we need only * deal with secondary writes as we try to suspend. */ if (MOUNTEDSOFTDEP(mp) == 0) { MNT_ILOCK(mp); while (mp->mnt_secondary_writes != 0) { BO_UNLOCK(bo); msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), (PUSER - 1) | PDROP, "secwr", 0); BO_LOCK(bo); MNT_ILOCK(mp); } /* * Reasons for needing more work before suspend: * - Dirty buffers on devvp. * - Secondary writes occurred after start of vnode sync loop */ error = 0; if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0 || secondary_writes != 0 || mp->mnt_secondary_writes != 0 || secondary_accwrites != mp->mnt_secondary_accwrites) error = EAGAIN; BO_UNLOCK(bo); return (error); } /* * If we are running with soft updates, then we need to coordinate * with them as we try to suspend. */ ump = VFSTOUFS(mp); for (;;) { if (!TRY_ACQUIRE_LOCK(ump)) { BO_UNLOCK(bo); ACQUIRE_LOCK(ump); FREE_LOCK(ump); BO_LOCK(bo); continue; } MNT_ILOCK(mp); if (mp->mnt_secondary_writes != 0) { FREE_LOCK(ump); BO_UNLOCK(bo); msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), (PUSER - 1) | PDROP, "secwr", 0); BO_LOCK(bo); continue; } break; } unlinked = 0; if (MOUNTEDSUJ(mp)) { for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked); inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { if ((inodedep->id_state & (UNLINKED | UNLINKLINKS | UNLINKONLIST)) != (UNLINKED | UNLINKLINKS | UNLINKONLIST) || !check_inodedep_free(inodedep)) continue; unlinked++; } } /* * Reasons for needing more work before suspend: * - Dirty buffers on devvp. * - Softdep activity occurred after start of vnode sync loop * - Secondary writes occurred after start of vnode sync loop */ error = 0; if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0 || softdep_depcnt != unlinked || ump->softdep_deps != unlinked || softdep_accdepcnt != ump->softdep_accdeps || secondary_writes != 0 || mp->mnt_secondary_writes != 0 || secondary_accwrites != mp->mnt_secondary_accwrites) error = EAGAIN; FREE_LOCK(ump); BO_UNLOCK(bo); return (error); } /* * Get the number of dependency structures for the file system, both * the current number and the total number allocated. These will * later be used to detect that softdep processing has occurred. */ void softdep_get_depcounts(struct mount *mp, int *softdep_depsp, int *softdep_accdepsp) { struct ufsmount *ump; if (MOUNTEDSOFTDEP(mp) == 0) { *softdep_depsp = 0; *softdep_accdepsp = 0; return; } ump = VFSTOUFS(mp); ACQUIRE_LOCK(ump); *softdep_depsp = ump->softdep_deps; *softdep_accdepsp = ump->softdep_accdeps; FREE_LOCK(ump); } /* * Wait for pending output on a vnode to complete. */ static void drain_output(vp) struct vnode *vp; { ASSERT_VOP_LOCKED(vp, "drain_output"); (void)bufobj_wwait(&vp->v_bufobj, 0, 0); } /* * Called whenever a buffer that is being invalidated or reallocated * contains dependencies. This should only happen if an I/O error has * occurred. The routine is called with the buffer locked. */ static void softdep_deallocate_dependencies(bp) struct buf *bp; { if ((bp->b_ioflags & BIO_ERROR) == 0) panic("softdep_deallocate_dependencies: dangling deps"); if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL) softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); else printf("softdep_deallocate_dependencies: " "got error %d while accessing filesystem\n", bp->b_error); if (bp->b_error != ENXIO) panic("softdep_deallocate_dependencies: unrecovered I/O error"); } /* * Function to handle asynchronous write errors in the filesystem. */ static void softdep_error(func, error) char *func; int error; { /* XXX should do something better! */ printf("%s: got error %d while accessing filesystem\n", func, error); } #ifdef DDB static void inodedep_print(struct inodedep *inodedep, int verbose) { db_printf("%p fs %p st %x ino %jd inoblk %jd delta %jd nlink %jd" " saveino %p\n", inodedep, inodedep->id_fs, inodedep->id_state, (intmax_t)inodedep->id_ino, (intmax_t)fsbtodb(inodedep->id_fs, ino_to_fsba(inodedep->id_fs, inodedep->id_ino)), (intmax_t)inodedep->id_nlinkdelta, (intmax_t)inodedep->id_savednlink, inodedep->id_savedino1); if (verbose == 0) return; db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, " "mkdiradd %p\n", LIST_FIRST(&inodedep->id_pendinghd), LIST_FIRST(&inodedep->id_bufwait), LIST_FIRST(&inodedep->id_inowait), TAILQ_FIRST(&inodedep->id_inoreflst), inodedep->id_mkdiradd); db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n", TAILQ_FIRST(&inodedep->id_inoupdt), TAILQ_FIRST(&inodedep->id_newinoupdt), TAILQ_FIRST(&inodedep->id_extupdt), TAILQ_FIRST(&inodedep->id_newextupdt)); } DB_SHOW_COMMAND(inodedep, db_show_inodedep) { if (have_addr == 0) { db_printf("Address required\n"); return; } inodedep_print((struct inodedep*)addr, 1); } DB_SHOW_COMMAND(inodedeps, db_show_inodedeps) { struct inodedep_hashhead *inodedephd; struct inodedep *inodedep; struct ufsmount *ump; int cnt; if (have_addr == 0) { db_printf("Address required\n"); return; } ump = (struct ufsmount *)addr; for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) { inodedephd = &ump->inodedep_hashtbl[cnt]; LIST_FOREACH(inodedep, inodedephd, id_hash) { inodedep_print(inodedep, 0); } } } DB_SHOW_COMMAND(worklist, db_show_worklist) { struct worklist *wk; if (have_addr == 0) { db_printf("Address required\n"); return; } wk = (struct worklist *)addr; printf("worklist: %p type %s state 0x%X\n", wk, TYPENAME(wk->wk_type), wk->wk_state); } DB_SHOW_COMMAND(workhead, db_show_workhead) { struct workhead *wkhd; struct worklist *wk; int i; if (have_addr == 0) { db_printf("Address required\n"); return; } wkhd = (struct workhead *)addr; wk = LIST_FIRST(wkhd); for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list)) db_printf("worklist: %p type %s state 0x%X", wk, TYPENAME(wk->wk_type), wk->wk_state); if (i == 100) db_printf("workhead overflow"); printf("\n"); } DB_SHOW_COMMAND(mkdirs, db_show_mkdirs) { struct mkdirlist *mkdirlisthd; struct jaddref *jaddref; struct diradd *diradd; struct mkdir *mkdir; if (have_addr == 0) { db_printf("Address required\n"); return; } mkdirlisthd = (struct mkdirlist *)addr; LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) { diradd = mkdir->md_diradd; db_printf("mkdir: %p state 0x%X dap %p state 0x%X", mkdir, mkdir->md_state, diradd, diradd->da_state); if ((jaddref = mkdir->md_jaddref) != NULL) db_printf(" jaddref %p jaddref state 0x%X", jaddref, jaddref->ja_state); db_printf("\n"); } } /* exported to ffs_vfsops.c */ extern void db_print_ffs(struct ufsmount *ump); void db_print_ffs(struct ufsmount *ump) { db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n", ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname, ump->um_devvp, ump->um_fs, ump->softdep_on_worklist, ump->softdep_deps, ump->softdep_req); } #endif /* DDB */ #endif /* SOFTUPDATES */ Index: projects/clang700-import/sys/ufs/ffs/ffs_subr.c =================================================================== --- projects/clang700-import/sys/ufs/ffs/ffs_subr.c (revision 340426) +++ projects/clang700-import/sys/ufs/ffs/ffs_subr.c (revision 340427) @@ -1,611 +1,615 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_subr.c 8.5 (Berkeley) 3/21/95 */ #include __FBSDID("$FreeBSD$"); #include #ifndef _KERNEL #include #include #include #include #include #include #include uint32_t calculate_crc32c(uint32_t, const void *, size_t); struct malloc_type; #define UFS_MALLOC(size, type, flags) malloc(size) #define UFS_FREE(ptr, type) free(ptr) #define UFS_TIME time(NULL) #else /* _KERNEL */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define UFS_MALLOC(size, type, flags) malloc(size, type, flags) #define UFS_FREE(ptr, type) free(ptr, type) #define UFS_TIME time_second /* * Return buffer with the contents of block "offset" from the beginning of * directory "ip". If "res" is non-zero, fill it in with a pointer to the * remaining space in the directory. */ int ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) { struct inode *ip; struct fs *fs; struct buf *bp; ufs_lbn_t lbn; int bsize, error; ip = VTOI(vp); fs = ITOFS(ip); lbn = lblkno(fs, offset); bsize = blksize(fs, ip, lbn); *bpp = NULL; error = bread(vp, lbn, bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } if (res) *res = (char *)bp->b_data + blkoff(fs, offset); *bpp = bp; return (0); } /* * Load up the contents of an inode and copy the appropriate pieces * to the incore copy. */ -void +int ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino) { + struct ufs1_dinode *dip1; + struct ufs2_dinode *dip2; if (I_IS_UFS1(ip)) { - *ip->i_din1 = + dip1 = ip->i_din1; + *dip1 = *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); - ip->i_mode = ip->i_din1->di_mode; - ip->i_nlink = ip->i_din1->di_nlink; - ip->i_size = ip->i_din1->di_size; - ip->i_flags = ip->i_din1->di_flags; - ip->i_gen = ip->i_din1->di_gen; - ip->i_uid = ip->i_din1->di_uid; - ip->i_gid = ip->i_din1->di_gid; - } else { - *ip->i_din2 = - *((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); - ip->i_mode = ip->i_din2->di_mode; - ip->i_nlink = ip->i_din2->di_nlink; - ip->i_size = ip->i_din2->di_size; - ip->i_flags = ip->i_din2->di_flags; - ip->i_gen = ip->i_din2->di_gen; - ip->i_uid = ip->i_din2->di_uid; - ip->i_gid = ip->i_din2->di_gid; + ip->i_mode = dip1->di_mode; + ip->i_nlink = dip1->di_nlink; + ip->i_size = dip1->di_size; + ip->i_flags = dip1->di_flags; + ip->i_gen = dip1->di_gen; + ip->i_uid = dip1->di_uid; + ip->i_gid = dip1->di_gid; + return (0); } + dip2 = ip->i_din2; + *dip2 = *((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino)); + ip->i_mode = dip2->di_mode; + ip->i_nlink = dip2->di_nlink; + ip->i_size = dip2->di_size; + ip->i_flags = dip2->di_flags; + ip->i_gen = dip2->di_gen; + ip->i_uid = dip2->di_uid; + ip->i_gid = dip2->di_gid; + return (0); } #endif /* KERNEL */ /* * These are the low-level functions that actually read and write * the superblock and its associated data. */ static off_t sblock_try[] = SBLOCKSEARCH; static int readsuper(void *, struct fs **, off_t, int, int (*)(void *, off_t, void **, int)); static uint32_t calc_sbhash(struct fs *); /* * Read a superblock from the devfd device. * * If an alternate superblock is specified, it is read. Otherwise the * set of locations given in the SBLOCKSEARCH list is searched for a * superblock. Memory is allocated for the superblock by the readfunc and * is returned. If filltype is non-NULL, additional memory is allocated * of type filltype and filled in with the superblock summary information. * All memory is freed when any error is returned. * * If a superblock is found, zero is returned. Otherwise one of the * following error values is returned: * EIO: non-existent or truncated superblock. * EIO: error reading summary information. * ENOENT: no usable known superblock found. * ENOSPC: failed to allocate space for the superblock. * EINVAL: The previous newfs operation on this volume did not complete. * The administrator must complete newfs before using this volume. */ int ffs_sbget(void *devfd, struct fs **fsp, off_t altsblock, struct malloc_type *filltype, int (*readfunc)(void *devfd, off_t loc, void **bufp, int size)) { struct fs *fs; int i, error, size, blks; uint8_t *space; int32_t *lp; char *buf; fs = NULL; *fsp = NULL; if (altsblock != -1) { if ((error = readsuper(devfd, &fs, altsblock, 1, readfunc)) != 0) { if (fs != NULL) UFS_FREE(fs, filltype); return (error); } } else { for (i = 0; sblock_try[i] != -1; i++) { if ((error = readsuper(devfd, &fs, sblock_try[i], 0, readfunc)) == 0) break; if (fs != NULL) { UFS_FREE(fs, filltype); fs = NULL; } if (error == ENOENT) continue; return (error); } if (sblock_try[i] == -1) return (ENOENT); } /* * Read in the superblock summary information. */ size = fs->fs_cssize; blks = howmany(size, fs->fs_fsize); if (fs->fs_contigsumsize > 0) size += fs->fs_ncg * sizeof(int32_t); size += fs->fs_ncg * sizeof(u_int8_t); /* When running in libufs or libsa, UFS_MALLOC may fail */ if ((space = UFS_MALLOC(size, filltype, M_WAITOK)) == NULL) { UFS_FREE(fs, filltype); return (ENOSPC); } fs->fs_csp = (struct csum *)space; for (i = 0; i < blks; i += fs->fs_frag) { size = fs->fs_bsize; if (i + fs->fs_frag > blks) size = (blks - i) * fs->fs_fsize; buf = NULL; error = (*readfunc)(devfd, dbtob(fsbtodb(fs, fs->fs_csaddr + i)), (void **)&buf, size); if (error) { if (buf != NULL) UFS_FREE(buf, filltype); UFS_FREE(fs->fs_csp, filltype); UFS_FREE(fs, filltype); return (error); } memcpy(space, buf, size); UFS_FREE(buf, filltype); space += size; } if (fs->fs_contigsumsize > 0) { fs->fs_maxcluster = lp = (int32_t *)space; for (i = 0; i < fs->fs_ncg; i++) *lp++ = fs->fs_contigsumsize; space = (uint8_t *)lp; } size = fs->fs_ncg * sizeof(u_int8_t); fs->fs_contigdirs = (u_int8_t *)space; bzero(fs->fs_contigdirs, size); *fsp = fs; return (0); } /* * Try to read a superblock from the location specified by sblockloc. * Return zero on success or an errno on failure. */ static int readsuper(void *devfd, struct fs **fsp, off_t sblockloc, int isaltsblk, int (*readfunc)(void *devfd, off_t loc, void **bufp, int size)) { struct fs *fs; int error, res; uint32_t ckhash; error = (*readfunc)(devfd, sblockloc, (void **)fsp, SBLOCKSIZE); if (error != 0) return (error); fs = *fsp; if (fs->fs_magic == FS_BAD_MAGIC) return (EINVAL); if (((fs->fs_magic == FS_UFS1_MAGIC && (isaltsblk || sblockloc <= SBLOCK_UFS1)) || (fs->fs_magic == FS_UFS2_MAGIC && (isaltsblk || sblockloc == fs->fs_sblockloc))) && fs->fs_ncg >= 1 && fs->fs_bsize >= MINBSIZE && fs->fs_bsize <= MAXBSIZE && fs->fs_bsize >= roundup(sizeof(struct fs), DEV_BSIZE) && fs->fs_sbsize <= SBLOCKSIZE) { if (fs->fs_ckhash != (ckhash = calc_sbhash(fs))) { #ifdef _KERNEL res = uprintf("Superblock check-hash failed: recorded " "check-hash 0x%x != computed check-hash 0x%x\n", fs->fs_ckhash, ckhash); #else res = 0; #endif /* * Print check-hash failure if no controlling terminal * in kernel or always if in user-mode (libufs). */ if (res == 0) printf("Superblock check-hash failed: recorded " "check-hash 0x%x != computed check-hash " "0x%x\n", fs->fs_ckhash, ckhash); return (EINVAL); } /* Have to set for old filesystems that predate this field */ fs->fs_sblockactualloc = sblockloc; /* Not yet any summary information */ fs->fs_csp = NULL; return (0); } return (ENOENT); } /* * Write a superblock to the devfd device from the memory pointed to by fs. * Write out the superblock summary information if it is present. * * If the write is successful, zero is returned. Otherwise one of the * following error values is returned: * EIO: failed to write superblock. * EIO: failed to write superblock summary information. */ int ffs_sbput(void *devfd, struct fs *fs, off_t loc, int (*writefunc)(void *devfd, off_t loc, void *buf, int size)) { int i, error, blks, size; uint8_t *space; /* * If there is summary information, write it first, so if there * is an error, the superblock will not be marked as clean. */ if (fs->fs_csp != NULL) { blks = howmany(fs->fs_cssize, fs->fs_fsize); space = (uint8_t *)fs->fs_csp; for (i = 0; i < blks; i += fs->fs_frag) { size = fs->fs_bsize; if (i + fs->fs_frag > blks) size = (blks - i) * fs->fs_fsize; if ((error = (*writefunc)(devfd, dbtob(fsbtodb(fs, fs->fs_csaddr + i)), space, size)) != 0) return (error); space += size; } } fs->fs_fmod = 0; fs->fs_time = UFS_TIME; fs->fs_ckhash = calc_sbhash(fs); if ((error = (*writefunc)(devfd, loc, fs, fs->fs_sbsize)) != 0) return (error); return (0); } /* * Calculate the check-hash for a superblock. */ static uint32_t calc_sbhash(struct fs *fs) { uint32_t ckhash, save_ckhash; /* * A filesystem that was using a superblock ckhash may be moved * to an older kernel that does not support ckhashes. The * older kernel will clear the FS_METACKHASH flag indicating * that it does not update hashes. When the disk is moved back * to a kernel capable of ckhashes it disables them on mount: * * if ((fs->fs_flags & FS_METACKHASH) == 0) * fs->fs_metackhash = 0; * * This leaves (fs->fs_metackhash & CK_SUPERBLOCK) == 0) with an * old stale value in the fs->fs_ckhash field. Thus the need to * just accept what is there. */ if ((fs->fs_metackhash & CK_SUPERBLOCK) == 0) return (fs->fs_ckhash); save_ckhash = fs->fs_ckhash; fs->fs_ckhash = 0; /* * If newly read from disk, the caller is responsible for * verifying that fs->fs_sbsize <= SBLOCKSIZE. */ ckhash = calculate_crc32c(~0L, (void *)fs, fs->fs_sbsize); fs->fs_ckhash = save_ckhash; return (ckhash); } /* * Update the frsum fields to reflect addition or deletion * of some frags. */ void ffs_fragacct(struct fs *fs, int fragmap, int32_t fraglist[], int cnt) { int inblk; int field, subfield; int siz, pos; inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1; fragmap <<= 1; for (siz = 1; siz < fs->fs_frag; siz++) { if ((inblk & (1 << (siz + (fs->fs_frag % NBBY)))) == 0) continue; field = around[siz]; subfield = inside[siz]; for (pos = siz; pos <= fs->fs_frag; pos++) { if ((fragmap & field) == subfield) { fraglist[siz] += cnt; pos += siz; field <<= siz; subfield <<= siz; } field <<= 1; subfield <<= 1; } } } /* * block operations * * check if a block is available */ int ffs_isblock(struct fs *fs, unsigned char *cp, ufs1_daddr_t h) { unsigned char mask; switch ((int)fs->fs_frag) { case 8: return (cp[h] == 0xff); case 4: mask = 0x0f << ((h & 0x1) << 2); return ((cp[h >> 1] & mask) == mask); case 2: mask = 0x03 << ((h & 0x3) << 1); return ((cp[h >> 2] & mask) == mask); case 1: mask = 0x01 << (h & 0x7); return ((cp[h >> 3] & mask) == mask); default: #ifdef _KERNEL panic("ffs_isblock"); #endif break; } return (0); } /* * check if a block is free */ int ffs_isfreeblock(struct fs *fs, u_char *cp, ufs1_daddr_t h) { switch ((int)fs->fs_frag) { case 8: return (cp[h] == 0); case 4: return ((cp[h >> 1] & (0x0f << ((h & 0x1) << 2))) == 0); case 2: return ((cp[h >> 2] & (0x03 << ((h & 0x3) << 1))) == 0); case 1: return ((cp[h >> 3] & (0x01 << (h & 0x7))) == 0); default: #ifdef _KERNEL panic("ffs_isfreeblock"); #endif break; } return (0); } /* * take a block out of the map */ void ffs_clrblock(struct fs *fs, u_char *cp, ufs1_daddr_t h) { switch ((int)fs->fs_frag) { case 8: cp[h] = 0; return; case 4: cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); return; case 2: cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); return; case 1: cp[h >> 3] &= ~(0x01 << (h & 0x7)); return; default: #ifdef _KERNEL panic("ffs_clrblock"); #endif break; } } /* * put a block into the map */ void ffs_setblock(struct fs *fs, unsigned char *cp, ufs1_daddr_t h) { switch ((int)fs->fs_frag) { case 8: cp[h] = 0xff; return; case 4: cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); return; case 2: cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); return; case 1: cp[h >> 3] |= (0x01 << (h & 0x7)); return; default: #ifdef _KERNEL panic("ffs_setblock"); #endif break; } } /* * Update the cluster map because of an allocation or free. * * Cnt == 1 means free; cnt == -1 means allocating. */ void ffs_clusteracct(struct fs *fs, struct cg *cgp, ufs1_daddr_t blkno, int cnt) { int32_t *sump; int32_t *lp; u_char *freemapp, *mapp; int i, start, end, forw, back, map; u_int bit; if (fs->fs_contigsumsize <= 0) return; freemapp = cg_clustersfree(cgp); sump = cg_clustersum(cgp); /* * Allocate or clear the actual block. */ if (cnt > 0) setbit(freemapp, blkno); else clrbit(freemapp, blkno); /* * Find the size of the cluster going forward. */ start = blkno + 1; end = start + fs->fs_contigsumsize; if (end >= cgp->cg_nclusterblks) end = cgp->cg_nclusterblks; mapp = &freemapp[start / NBBY]; map = *mapp++; bit = 1U << (start % NBBY); for (i = start; i < end; i++) { if ((map & bit) == 0) break; if ((i & (NBBY - 1)) != (NBBY - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } forw = i - start; /* * Find the size of the cluster going backward. */ start = blkno - 1; end = start - fs->fs_contigsumsize; if (end < 0) end = -1; mapp = &freemapp[start / NBBY]; map = *mapp--; bit = 1U << (start % NBBY); for (i = start; i > end; i--) { if ((map & bit) == 0) break; if ((i & (NBBY - 1)) != 0) { bit >>= 1; } else { map = *mapp--; bit = 1U << (NBBY - 1); } } back = start - i; /* * Account for old cluster and the possibly new forward and * back clusters. */ i = back + forw + 1; if (i > fs->fs_contigsumsize) i = fs->fs_contigsumsize; sump[i] += cnt; if (back > 0) sump[back] -= cnt; if (forw > 0) sump[forw] -= cnt; /* * Update cluster summary information. */ lp = &sump[fs->fs_contigsumsize]; for (i = fs->fs_contigsumsize; i > 0; i--) if (*lp-- > 0) break; fs->fs_maxcluster[cgp->cg_cgx] = i; } Index: projects/clang700-import/sys/ufs/ffs/ffs_vfsops.c =================================================================== --- projects/clang700-import/sys/ufs/ffs/ffs_vfsops.c (revision 340426) +++ projects/clang700-import/sys/ufs/ffs/ffs_vfsops.c (revision 340427) @@ -1,2321 +1,2329 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 */ #include __FBSDID("$FreeBSD$"); #include "opt_quota.h" #include "opt_ufs.h" #include "opt_ffs.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, ufs2_daddr_t); static void ffs_ifree(struct ufsmount *ump, struct inode *ip); static int ffs_sync_lazy(struct mount *mp); static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size); static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size); static vfs_init_t ffs_init; static vfs_uninit_t ffs_uninit; static vfs_extattrctl_t ffs_extattrctl; static vfs_cmount_t ffs_cmount; static vfs_unmount_t ffs_unmount; static vfs_mount_t ffs_mount; static vfs_statfs_t ffs_statfs; static vfs_fhtovp_t ffs_fhtovp; static vfs_sync_t ffs_sync; static struct vfsops ufs_vfsops = { .vfs_extattrctl = ffs_extattrctl, .vfs_fhtovp = ffs_fhtovp, .vfs_init = ffs_init, .vfs_mount = ffs_mount, .vfs_cmount = ffs_cmount, .vfs_quotactl = ufs_quotactl, .vfs_root = ufs_root, .vfs_statfs = ffs_statfs, .vfs_sync = ffs_sync, .vfs_uninit = ffs_uninit, .vfs_unmount = ffs_unmount, .vfs_vget = ffs_vget, .vfs_susp_clean = process_deferred_inactive, }; VFS_SET(ufs_vfsops, ufs, 0); MODULE_VERSION(ufs, 1); static b_strategy_t ffs_geom_strategy; static b_write_t ffs_bufwrite; static struct buf_ops ffs_ops = { .bop_name = "FFS", .bop_write = ffs_bufwrite, .bop_strategy = ffs_geom_strategy, .bop_sync = bufsync, #ifdef NO_FFS_SNAPSHOT .bop_bdflush = bufbdflush, #else .bop_bdflush = ffs_bdflush, #endif }; /* * Note that userquota and groupquota options are not currently used * by UFS/FFS code and generally mount(8) does not pass those options * from userland, but they can be passed by loader(8) via * vfs.root.mountfrom.options. */ static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", "noclusterw", "noexec", "export", "force", "from", "groupquota", "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir", "nosymfollow", "sync", "union", "userquota", NULL }; static int ffs_mount(struct mount *mp) { struct vnode *devvp; struct thread *td; struct ufsmount *ump = NULL; struct fs *fs; pid_t fsckpid = 0; int error, error1, flags; uint64_t mntorflags; accmode_t accmode; struct nameidata ndp; char *fspec; td = curthread; if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) return (EINVAL); if (uma_inode == NULL) { uma_inode = uma_zcreate("FFS inode", sizeof(struct inode), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); uma_ufs1 = uma_zcreate("FFS1 dinode", sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); uma_ufs2 = uma_zcreate("FFS2 dinode", sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); } vfs_deleteopt(mp->mnt_optnew, "groupquota"); vfs_deleteopt(mp->mnt_optnew, "userquota"); fspec = vfs_getopts(mp->mnt_optnew, "from", &error); if (error) return (error); mntorflags = 0; if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) mntorflags |= MNT_ACLS; if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { mntorflags |= MNT_SNAPSHOT; /* * Once we have set the MNT_SNAPSHOT flag, do not * persist "snapshot" in the options list. */ vfs_deleteopt(mp->mnt_optnew, "snapshot"); vfs_deleteopt(mp->mnt_opt, "snapshot"); } if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 && vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) { /* * Once we have set the restricted PID, do not * persist "fsckpid" in the options list. */ vfs_deleteopt(mp->mnt_optnew, "fsckpid"); vfs_deleteopt(mp->mnt_opt, "fsckpid"); if (mp->mnt_flag & MNT_UPDATE) { if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 && vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { vfs_mount_error(mp, "Checker enable: Must be read-only"); return (EINVAL); } } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { vfs_mount_error(mp, "Checker enable: Must be read-only"); return (EINVAL); } /* Set to -1 if we are done */ if (fsckpid == 0) fsckpid = -1; } if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { if (mntorflags & MNT_ACLS) { vfs_mount_error(mp, "\"acls\" and \"nfsv4acls\" options " "are mutually exclusive"); return (EINVAL); } mntorflags |= MNT_NFS4ACLS; } MNT_ILOCK(mp); mp->mnt_flag |= mntorflags; MNT_IUNLOCK(mp); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOUFS(mp); fs = ump->um_fs; devvp = ump->um_devvp; if (fsckpid == -1 && ump->um_fsckpid > 0) { if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 || (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) return (error); g_topology_lock(); /* * Return to normal read-only mode. */ error = g_access(ump->um_cp, 0, -1, 0); g_topology_unlock(); ump->um_fsckpid = 0; } if (fs->fs_ronly == 0 && vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { /* * Flush any dirty data and suspend filesystem. */ if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) return (error); error = vfs_write_suspend_umnt(mp); if (error != 0) return (error); /* * Check for and optionally get rid of files open * for writing. */ flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; if (MOUNTEDSOFTDEP(mp)) { error = softdep_flushfiles(mp, flags, td); } else { error = ffs_flushfiles(mp, flags, td); } if (error) { vfs_write_resume(mp, 0); return (error); } if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { printf("WARNING: %s Update error: blocks %jd " "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) fs->fs_clean = 1; if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { fs->fs_ronly = 0; fs->fs_clean = 0; vfs_write_resume(mp, 0); return (error); } if (MOUNTEDSOFTDEP(mp)) softdep_unmount(mp); g_topology_lock(); /* * Drop our write and exclusive access. */ g_access(ump->um_cp, 0, -1, -1); g_topology_unlock(); fs->fs_ronly = 1; MNT_ILOCK(mp); mp->mnt_flag |= MNT_RDONLY; MNT_IUNLOCK(mp); /* * Allow the writers to note that filesystem * is ro now. */ vfs_write_resume(mp, 0); } if ((mp->mnt_flag & MNT_RELOAD) && (error = ffs_reload(mp, td, 0)) != 0) return (error); if (fs->fs_ronly && !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { /* * If we are running a checker, do not allow upgrade. */ if (ump->um_fsckpid > 0) { vfs_mount_error(mp, "Active checker, cannot upgrade to write"); return (EINVAL); } /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { VOP_UNLOCK(devvp, 0); return (error); } VOP_UNLOCK(devvp, 0); fs->fs_flags &= ~FS_UNCLEAN; if (fs->fs_clean == 0) { fs->fs_flags |= FS_UNCLEAN; if ((mp->mnt_flag & MNT_FORCE) || ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && (fs->fs_flags & FS_DOSOFTDEP))) { printf("WARNING: %s was not properly " "dismounted\n", fs->fs_fsmnt); } else { vfs_mount_error(mp, "R/W mount of %s denied. %s.%s", fs->fs_fsmnt, "Filesystem is not clean - run fsck", (fs->fs_flags & FS_SUJ) == 0 ? "" : " Forced mount will invalidate" " journal contents"); return (EPERM); } } g_topology_lock(); /* * Request exclusive write access. */ error = g_access(ump->um_cp, 0, 1, 1); g_topology_unlock(); if (error) return (error); if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) return (error); fs->fs_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); fs->fs_mtime = time_second; /* check to see if we need to start softdep */ if ((fs->fs_flags & FS_DOSOFTDEP) && (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ vn_finished_write(mp); return (error); } fs->fs_clean = 0; if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { vn_finished_write(mp); return (error); } if (fs->fs_snapinum[0] != 0) ffs_snapshot_mount(mp); vn_finished_write(mp); } /* * Soft updates is incompatible with "async", * so if we are doing softupdates stop the user * from setting the async flag in an update. * Softdep_mount() clears it in an initial mount * or ro->rw remount. */ if (MOUNTEDSOFTDEP(mp)) { /* XXX: Reset too late ? */ MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_ASYNC; MNT_IUNLOCK(mp); } /* * Keep MNT_ACLS flag if it is stored in superblock. */ if ((fs->fs_flags & FS_ACLS) != 0) { /* XXX: Set too late ? */ MNT_ILOCK(mp); mp->mnt_flag |= MNT_ACLS; MNT_IUNLOCK(mp); } if ((fs->fs_flags & FS_NFS4ACLS) != 0) { /* XXX: Set too late ? */ MNT_ILOCK(mp); mp->mnt_flag |= MNT_NFS4ACLS; MNT_IUNLOCK(mp); } /* * If this is a request from fsck to clean up the filesystem, * then allow the specified pid to proceed. */ if (fsckpid > 0) { if (ump->um_fsckpid != 0) { vfs_mount_error(mp, "Active checker already running on %s", fs->fs_fsmnt); return (EINVAL); } KASSERT(MOUNTEDSOFTDEP(mp) == 0, ("soft updates enabled on read-only file system")); g_topology_lock(); /* * Request write access. */ error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); if (error) { vfs_mount_error(mp, "Checker activation failed on %s", fs->fs_fsmnt); return (error); } ump->um_fsckpid = fsckpid; if (fs->fs_snapinum[0] != 0) ffs_snapshot_mount(mp); fs->fs_mtime = time_second; fs->fs_fmod = 1; fs->fs_clean = 0; (void) ffs_sbupdate(ump, MNT_WAIT, 0); } /* * If this is a snapshot request, take the snapshot. */ if (mp->mnt_flag & MNT_SNAPSHOT) return (ffs_snapshot(mp, fspec)); /* * Must not call namei() while owning busy ref. */ vfs_unbusy(mp); } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); error = namei(&ndp); if ((mp->mnt_flag & MNT_UPDATE) != 0) { /* * Unmount does not start if MNT_UPDATE is set. Mount * update busies mp before setting MNT_UPDATE. We * must be able to retain our busy ref succesfully, * without sleep. */ error1 = vfs_busy(mp, MBF_NOWAIT); MPASS(error1 == 0); } if (error != 0) return (error); NDFREE(&ndp, NDF_ONLY_PNBUF); devvp = ndp.ni_vp; if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ accmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accmode |= VWRITE; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if (mp->mnt_flag & MNT_UPDATE) { /* * Update only * * If it's not the same vnode, or at least the same device * then it's not correct. */ if (devvp->v_rdev != ump->um_devvp->v_rdev) error = EINVAL; /* needs translation */ vput(devvp); if (error) return (error); } else { /* * New mount * * We need the name for the mount point (also used for * "last mounted on") copied in. If an error occurs, * the mount point is discarded by the upper level code. * Note that vfs_mount_alloc() populates f_mntonname for us. */ if ((error = ffs_mountfs(devvp, mp, td)) != 0) { vrele(devvp); return (error); } if (fsckpid > 0) { KASSERT(MOUNTEDSOFTDEP(mp) == 0, ("soft updates enabled on read-only file system")); ump = VFSTOUFS(mp); fs = ump->um_fs; g_topology_lock(); /* * Request write access. */ error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); if (error) { printf("WARNING: %s: Checker activation " "failed\n", fs->fs_fsmnt); } else { ump->um_fsckpid = fsckpid; if (fs->fs_snapinum[0] != 0) ffs_snapshot_mount(mp); fs->fs_mtime = time_second; fs->fs_clean = 0; (void) ffs_sbupdate(ump, MNT_WAIT, 0); } } } vfs_mountedfrom(mp, fspec); return (0); } /* * Compatibility with old mount system call. */ static int ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) { struct ufs_args args; struct export_args exp; int error; if (data == NULL) return (EINVAL); error = copyin(data, &args, sizeof args); if (error) return (error); vfs_oexport_conv(&args.export, &exp); ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); ma = mount_arg(ma, "export", &exp, sizeof(exp)); error = kernel_mount(ma, flags); return (error); } /* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). If the 'force' flag * is 0, the filesystem must be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) re-read summary information from disk. * 4) invalidate all inactive vnodes. * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary * writers, if requested. * 6) invalidate all cached file data. * 7) re-read inode data for all active vnodes. */ int ffs_reload(struct mount *mp, struct thread *td, int flags) { struct vnode *vp, *mvp, *devvp; struct inode *ip; void *space; struct buf *bp; struct fs *fs, *newfs; struct ufsmount *ump; ufs2_daddr_t sblockloc; int i, blks, error; u_long size; int32_t *lp; ump = VFSTOUFS(mp); MNT_ILOCK(mp); if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { MNT_IUNLOCK(mp); return (EINVAL); } MNT_IUNLOCK(mp); /* * Step 1: invalidate all cached meta-data. */ devvp = VFSTOUFS(mp)->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); if (vinvalbuf(devvp, 0, 0, 0) != 0) panic("ffs_reload: dirty1"); VOP_UNLOCK(devvp, 0); /* * Step 2: re-read superblock from disk. */ fs = VFSTOUFS(mp)->um_fs; if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, NOCRED, &bp)) != 0) return (error); newfs = (struct fs *)bp->b_data; if ((newfs->fs_magic != FS_UFS1_MAGIC && newfs->fs_magic != FS_UFS2_MAGIC) || newfs->fs_bsize > MAXBSIZE || newfs->fs_bsize < sizeof(struct fs)) { brelse(bp); return (EIO); /* XXX needs translation */ } /* * Copy pointer fields back into superblock before copying in XXX * new superblock. These should really be in the ufsmount. XXX * Note that important parameters (eg fs_ncg) are unchanged. */ newfs->fs_csp = fs->fs_csp; newfs->fs_maxcluster = fs->fs_maxcluster; newfs->fs_contigdirs = fs->fs_contigdirs; newfs->fs_active = fs->fs_active; newfs->fs_ronly = fs->fs_ronly; sblockloc = fs->fs_sblockloc; bcopy(newfs, fs, (u_int)fs->fs_sbsize); brelse(bp); mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); UFS_LOCK(ump); if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { printf("WARNING: %s: reload pending error: blocks %jd " "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } UFS_UNLOCK(ump); /* * Step 3: re-read summary information from disk. */ size = fs->fs_cssize; blks = howmany(size, fs->fs_fsize); if (fs->fs_contigsumsize > 0) size += fs->fs_ncg * sizeof(int32_t); size += fs->fs_ncg * sizeof(u_int8_t); free(fs->fs_csp, M_UFSMNT); space = malloc(size, M_UFSMNT, M_WAITOK); fs->fs_csp = space; for (i = 0; i < blks; i += fs->fs_frag) { size = fs->fs_bsize; if (i + fs->fs_frag > blks) size = (blks - i) * fs->fs_fsize; error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, NOCRED, &bp); if (error) return (error); bcopy(bp->b_data, space, (u_int)size); space = (char *)space + size; brelse(bp); } /* * We no longer know anything about clusters per cylinder group. */ if (fs->fs_contigsumsize > 0) { fs->fs_maxcluster = lp = space; for (i = 0; i < fs->fs_ncg; i++) *lp++ = fs->fs_contigsumsize; space = lp; } size = fs->fs_ncg * sizeof(u_int8_t); fs->fs_contigdirs = (u_int8_t *)space; bzero(fs->fs_contigdirs, size); if ((flags & FFSR_UNSUSPEND) != 0) { MNT_ILOCK(mp); mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); wakeup(&mp->mnt_flag); MNT_IUNLOCK(mp); } loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * Skip syncer vnode. */ if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } /* * Step 4: invalidate all cached file data. */ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } if (vinvalbuf(vp, 0, 0, 0)) panic("ffs_reload: dirty2"); /* * Step 5: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->fs_bsize, NOCRED, &bp); if (error) { - VOP_UNLOCK(vp, 0); - vrele(vp); + vput(vp); MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); return (error); } - ffs_load_inode(bp, ip, fs, ip->i_number); + if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) { + brelse(bp); + vput(vp); + MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); + return (error); + } ip->i_effnlink = ip->i_nlink; brelse(bp); - VOP_UNLOCK(vp, 0); - vrele(vp); + vput(vp); } return (0); } /* * Common code for mount and mountroot */ static int ffs_mountfs(devvp, mp, td) struct vnode *devvp; struct mount *mp; struct thread *td; { struct ufsmount *ump; struct fs *fs; struct cdev *dev; int error, i, len, ronly; struct ucred *cred; struct g_consumer *cp; struct mount *nmp; int candelete; fs = NULL; ump = NULL; cred = td ? td->td_ucred : NOCRED; ronly = (mp->mnt_flag & MNT_RDONLY) != 0; KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); dev = devvp->v_rdev; if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, (uintptr_t)mp) == 0) { VOP_UNLOCK(devvp, 0); return (EBUSY); } g_topology_lock(); error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); g_topology_unlock(); if (error != 0) { atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); VOP_UNLOCK(devvp, 0); return (error); } dev_ref(dev); devvp->v_bufobj.bo_ops = &ffs_ops; VOP_UNLOCK(devvp, 0); if (dev->si_iosize_max != 0) mp->mnt_iosize_max = dev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { error = EINVAL; vfs_mount_error(mp, "Invalid sectorsize %d for superblock size %d", cp->provider->sectorsize, SBLOCKSIZE); goto out; } /* fetch the superblock and summary information */ if ((error = ffs_sbget(devvp, &fs, -1, M_UFSMNT, ffs_use_bread)) != 0) goto out; fs->fs_fmod = 0; /* if we ran on a kernel without metadata check hashes, disable them */ if ((fs->fs_flags & FS_METACKHASH) == 0) fs->fs_metackhash = 0; /* none of these types of check-hashes are maintained by this kernel */ fs->fs_metackhash &= ~(CK_INODE | CK_INDIR | CK_DIR); /* no support for any undefined flags */ fs->fs_flags &= FS_SUPPORTED; fs->fs_flags &= ~FS_UNCLEAN; if (fs->fs_clean == 0) { fs->fs_flags |= FS_UNCLEAN; if (ronly || (mp->mnt_flag & MNT_FORCE) || ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && (fs->fs_flags & FS_DOSOFTDEP))) { printf("WARNING: %s was not properly dismounted\n", fs->fs_fsmnt); } else { vfs_mount_error(mp, "R/W mount of %s denied. %s%s", fs->fs_fsmnt, "Filesystem is not clean - run fsck.", (fs->fs_flags & FS_SUJ) == 0 ? "" : " Forced mount will invalidate journal contents"); error = EPERM; goto out; } if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && (mp->mnt_flag & MNT_FORCE)) { printf("WARNING: %s: lost blocks %jd files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } } if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { printf("WARNING: %s: mount pending error: blocks %jd " "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } if ((fs->fs_flags & FS_GJOURNAL) != 0) { #ifdef UFS_GJOURNAL /* * Get journal provider name. */ len = 1024; mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); if (g_io_getattr("GJOURNAL::provider", cp, &len, mp->mnt_gjprovider) == 0) { mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, M_UFSMNT, M_WAITOK); MNT_ILOCK(mp); mp->mnt_flag |= MNT_GJOURNAL; MNT_IUNLOCK(mp); } else { printf("WARNING: %s: GJOURNAL flag on fs " "but no gjournal provider below\n", mp->mnt_stat.f_mntonname); free(mp->mnt_gjprovider, M_UFSMNT); mp->mnt_gjprovider = NULL; } #else printf("WARNING: %s: GJOURNAL flag on fs but no " "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); #endif } else { mp->mnt_gjprovider = NULL; } ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); ump->um_cp = cp; ump->um_bo = &devvp->v_bufobj; ump->um_fs = fs; if (fs->fs_magic == FS_UFS1_MAGIC) { ump->um_fstype = UFS1; ump->um_balloc = ffs_balloc_ufs1; } else { ump->um_fstype = UFS2; ump->um_balloc = ffs_balloc_ufs2; } ump->um_blkatoff = ffs_blkatoff; ump->um_truncate = ffs_truncate; ump->um_update = ffs_update; ump->um_valloc = ffs_valloc; ump->um_vfree = ffs_vfree; ump->um_ifree = ffs_ifree; ump->um_rdonly = ffs_rdonly; ump->um_snapgone = ffs_snapgone; mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc); fs->fs_ronly = ronly; fs->fs_active = NULL; mp->mnt_data = ump; mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; nmp = NULL; if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { if (nmp) vfs_rel(nmp); vfs_getnewfsid(mp); } mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); if ((fs->fs_flags & FS_MULTILABEL) != 0) { #ifdef MAC MNT_ILOCK(mp); mp->mnt_flag |= MNT_MULTILABEL; MNT_IUNLOCK(mp); #else printf("WARNING: %s: multilabel flag on fs but " "no MAC support\n", mp->mnt_stat.f_mntonname); #endif } if ((fs->fs_flags & FS_ACLS) != 0) { #ifdef UFS_ACL MNT_ILOCK(mp); if (mp->mnt_flag & MNT_NFS4ACLS) printf("WARNING: %s: ACLs flag on fs conflicts with " "\"nfsv4acls\" mount option; option ignored\n", mp->mnt_stat.f_mntonname); mp->mnt_flag &= ~MNT_NFS4ACLS; mp->mnt_flag |= MNT_ACLS; MNT_IUNLOCK(mp); #else printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", mp->mnt_stat.f_mntonname); #endif } if ((fs->fs_flags & FS_NFS4ACLS) != 0) { #ifdef UFS_ACL MNT_ILOCK(mp); if (mp->mnt_flag & MNT_ACLS) printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " "with \"acls\" mount option; option ignored\n", mp->mnt_stat.f_mntonname); mp->mnt_flag &= ~MNT_ACLS; mp->mnt_flag |= MNT_NFS4ACLS; MNT_IUNLOCK(mp); #else printf("WARNING: %s: NFSv4 ACLs flag on fs but no " "ACLs support\n", mp->mnt_stat.f_mntonname); #endif } if ((fs->fs_flags & FS_TRIM) != 0) { len = sizeof(int); if (g_io_getattr("GEOM::candelete", cp, &len, &candelete) == 0) { if (candelete) ump->um_flags |= UM_CANDELETE; else printf("WARNING: %s: TRIM flag on fs but disk " "does not support TRIM\n", mp->mnt_stat.f_mntonname); } else { printf("WARNING: %s: TRIM flag on fs but disk does " "not confirm that it supports TRIM\n", mp->mnt_stat.f_mntonname); } if (((ump->um_flags) & UM_CANDELETE) != 0) { ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, taskqueue_thread_enqueue, &ump->um_trim_tq); taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, "%s trim", mp->mnt_stat.f_mntonname); ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM, &ump->um_trimlisthashsize); } } ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_nindir = fs->fs_nindir; ump->um_bptrtodb = fs->fs_fsbtodb; ump->um_seqinc = fs->fs_frag; for (i = 0; i < MAXQUOTAS; i++) ump->um_quotas[i] = NULLVP; #ifdef UFS_EXTATTR ufs_extattr_uepm_init(&ump->um_extattr); #endif /* * Set FS local "last mounted on" information (NULL pad) */ bzero(fs->fs_fsmnt, MAXMNTLEN); strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); mp->mnt_stat.f_iosize = fs->fs_bsize; if (mp->mnt_flag & MNT_ROOTFS) { /* * Root mount; update timestamp in mount structure. * this will be used by the common root mount code * to update the system clock. */ mp->mnt_time = fs->fs_time; } if (ronly == 0) { fs->fs_mtime = time_second; if ((fs->fs_flags & FS_DOSOFTDEP) && (error = softdep_mount(devvp, mp, fs, cred)) != 0) { ffs_flushfiles(mp, FORCECLOSE, td); goto out; } if (fs->fs_snapinum[0] != 0) ffs_snapshot_mount(mp); fs->fs_fmod = 1; fs->fs_clean = 0; (void) ffs_sbupdate(ump, MNT_WAIT, 0); } /* * Initialize filesystem state information in mount struct. */ MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; MNT_IUNLOCK(mp); #ifdef UFS_EXTATTR #ifdef UFS_EXTATTR_AUTOSTART /* * * Auto-starting does the following: * - check for /.attribute in the fs, and extattr_start if so * - for each file in .attribute, enable that file with * an attribute of the same name. * Not clear how to report errors -- probably eat them. * This would all happen while the filesystem was busy/not * available, so would effectively be "atomic". */ (void) ufs_extattr_autostart(mp, td); #endif /* !UFS_EXTATTR_AUTOSTART */ #endif /* !UFS_EXTATTR */ return (0); out: if (fs != NULL) { free(fs->fs_csp, M_UFSMNT); free(fs, M_UFSMNT); } if (cp != NULL) { g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); } if (ump) { mtx_destroy(UFS_MTX(ump)); if (mp->mnt_gjprovider != NULL) { free(mp->mnt_gjprovider, M_UFSMNT); mp->mnt_gjprovider = NULL; } free(ump, M_UFSMNT); mp->mnt_data = NULL; } atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); dev_rel(dev); return (error); } /* * A read function for use by filesystem-layer routines. */ static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size) { struct buf *bp; int error; KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp)); *bufp = malloc(size, M_UFSMNT, M_WAITOK); if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED, &bp)) != 0) return (error); bcopy(bp->b_data, *bufp, size); bp->b_flags |= B_INVAL | B_NOCACHE; brelse(bp); return (0); } #include static int bigcgs = 0; SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); /* * Sanity checks for loading old filesystem superblocks. * See ffs_oldfscompat_write below for unwound actions. * * XXX - Parts get retired eventually. * Unfortunately new bits get added. */ static void ffs_oldfscompat_read(fs, ump, sblockloc) struct fs *fs; struct ufsmount *ump; ufs2_daddr_t sblockloc; { off_t maxfilesize; /* * If not yet done, update fs_flags location and value of fs_sblockloc. */ if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { fs->fs_flags = fs->fs_old_flags; fs->fs_old_flags |= FS_FLAGS_UPDATED; fs->fs_sblockloc = sblockloc; } /* * If not yet done, update UFS1 superblock with new wider fields. */ if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { fs->fs_maxbsize = fs->fs_bsize; fs->fs_time = fs->fs_old_time; fs->fs_size = fs->fs_old_size; fs->fs_dsize = fs->fs_old_dsize; fs->fs_csaddr = fs->fs_old_csaddr; fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; } if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_old_inodefmt < FS_44INODEFMT) { fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; fs->fs_qbmask = ~fs->fs_bmask; fs->fs_qfmask = ~fs->fs_fmask; } if (fs->fs_magic == FS_UFS1_MAGIC) { ump->um_savedmaxfilesize = fs->fs_maxfilesize; maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; if (fs->fs_maxfilesize > maxfilesize) fs->fs_maxfilesize = maxfilesize; } /* Compatibility for old filesystems */ if (fs->fs_avgfilesize <= 0) fs->fs_avgfilesize = AVFILESIZ; if (fs->fs_avgfpdir <= 0) fs->fs_avgfpdir = AFPDIR; if (bigcgs) { fs->fs_save_cgsize = fs->fs_cgsize; fs->fs_cgsize = fs->fs_bsize; } } /* * Unwinding superblock updates for old filesystems. * See ffs_oldfscompat_read above for details. * * XXX - Parts get retired eventually. * Unfortunately new bits get added. */ void ffs_oldfscompat_write(fs, ump) struct fs *fs; struct ufsmount *ump; { /* * Copy back UFS2 updated fields that UFS1 inspects. */ if (fs->fs_magic == FS_UFS1_MAGIC) { fs->fs_old_time = fs->fs_time; fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; fs->fs_maxfilesize = ump->um_savedmaxfilesize; } if (bigcgs) { fs->fs_cgsize = fs->fs_save_cgsize; fs->fs_save_cgsize = 0; } } /* * unmount system call */ static int ffs_unmount(mp, mntflags) struct mount *mp; int mntflags; { struct thread *td; struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs; int error, flags, susp; #ifdef UFS_EXTATTR int e_restart; #endif flags = 0; td = curthread; fs = ump->um_fs; susp = 0; if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; susp = fs->fs_ronly == 0; } #ifdef UFS_EXTATTR if ((error = ufs_extattr_stop(mp, td))) { if (error != EOPNOTSUPP) printf("WARNING: unmount %s: ufs_extattr_stop " "returned errno %d\n", mp->mnt_stat.f_mntonname, error); e_restart = 0; } else { ufs_extattr_uepm_destroy(&ump->um_extattr); e_restart = 1; } #endif if (susp) { error = vfs_write_suspend_umnt(mp); if (error != 0) goto fail1; } if (MOUNTEDSOFTDEP(mp)) error = softdep_flushfiles(mp, flags, td); else error = ffs_flushfiles(mp, flags, td); if (error != 0 && error != ENXIO) goto fail; UFS_LOCK(ump); if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { printf("WARNING: unmount %s: pending error: blocks %jd " "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, fs->fs_pendinginodes); fs->fs_pendingblocks = 0; fs->fs_pendinginodes = 0; } UFS_UNLOCK(ump); if (MOUNTEDSOFTDEP(mp)) softdep_unmount(mp); if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) { fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; error = ffs_sbupdate(ump, MNT_WAIT, 0); if (error && error != ENXIO) { fs->fs_clean = 0; goto fail; } } if (susp) vfs_write_resume(mp, VR_START_WRITE); if (ump->um_trim_tq != NULL) { while (ump->um_trim_inflight != 0) pause("ufsutr", hz); taskqueue_drain_all(ump->um_trim_tq); taskqueue_free(ump->um_trim_tq); free (ump->um_trimhash, M_TRIM); } g_topology_lock(); if (ump->um_fsckpid > 0) { /* * Return to normal read-only mode. */ error = g_access(ump->um_cp, 0, -1, 0); ump->um_fsckpid = 0; } g_vfs_close(ump->um_cp); g_topology_unlock(); atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); vrele(ump->um_devvp); dev_rel(ump->um_dev); mtx_destroy(UFS_MTX(ump)); if (mp->mnt_gjprovider != NULL) { free(mp->mnt_gjprovider, M_UFSMNT); mp->mnt_gjprovider = NULL; } free(fs->fs_csp, M_UFSMNT); free(fs, M_UFSMNT); free(ump, M_UFSMNT); mp->mnt_data = NULL; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_LOCAL; MNT_IUNLOCK(mp); if (td->td_su == mp) { td->td_su = NULL; vfs_rel(mp); } return (error); fail: if (susp) vfs_write_resume(mp, VR_START_WRITE); fail1: #ifdef UFS_EXTATTR if (e_restart) { ufs_extattr_uepm_init(&ump->um_extattr); #ifdef UFS_EXTATTR_AUTOSTART (void) ufs_extattr_autostart(mp, td); #endif } #endif return (error); } /* * Flush out all the files in a filesystem. */ int ffs_flushfiles(mp, flags, td) struct mount *mp; int flags; struct thread *td; { struct ufsmount *ump; int qerror, error; ump = VFSTOUFS(mp); qerror = 0; #ifdef QUOTA if (mp->mnt_flag & MNT_QUOTA) { int i; error = vflush(mp, 0, SKIPSYSTEM|flags, td); if (error) return (error); for (i = 0; i < MAXQUOTAS; i++) { error = quotaoff(td, mp, i); if (error != 0) { if ((flags & EARLYFLUSH) == 0) return (error); else qerror = error; } } /* * Here we fall through to vflush again to ensure that * we have gotten rid of all the system vnodes, unless * quotas must not be closed. */ } #endif ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) return (error); ffs_snapshot_unmount(mp); flags |= FORCECLOSE; /* * Here we fall through to vflush again to ensure * that we have gotten rid of all the system vnodes. */ } /* * Do not close system files if quotas were not closed, to be * able to sync the remaining dquots. The freeblks softupdate * workitems might hold a reference on a dquot, preventing * quotaoff() from completing. Next round of * softdep_flushworklist() iteration should process the * blockers, allowing the next run of quotaoff() to finally * flush held dquots. * * Otherwise, flush all the files. */ if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) return (error); /* * Flush filesystem metadata. */ vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); VOP_UNLOCK(ump->um_devvp, 0); return (error); } /* * Get filesystem statistics. */ static int ffs_statfs(mp, sbp) struct mount *mp; struct statfs *sbp; { struct ufsmount *ump; struct fs *fs; ump = VFSTOUFS(mp); fs = ump->um_fs; if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) panic("ffs_statfs"); sbp->f_version = STATFS_VERSION; sbp->f_bsize = fs->fs_fsize; sbp->f_iosize = fs->fs_bsize; sbp->f_blocks = fs->fs_dsize; UFS_LOCK(ump); sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); sbp->f_bavail = freespace(fs, fs->fs_minfree) + dbtofsb(fs, fs->fs_pendingblocks); sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO; sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; UFS_UNLOCK(ump); sbp->f_namemax = UFS_MAXNAMLEN; return (0); } static bool sync_doupdate(struct inode *ip) { return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) != 0); } /* * For a lazy sync, we only care about access times, quotas and the * superblock. Other filesystem changes are already converted to * cylinder group blocks or inode blocks updates and are written to * disk by syncer. */ static int ffs_sync_lazy(mp) struct mount *mp; { struct vnode *mvp, *vp; struct inode *ip; struct thread *td; int allerror, error; allerror = 0; td = curthread; if ((mp->mnt_flag & MNT_NOATIME) != 0) goto qupdate; MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } ip = VTOI(vp); /* * The IN_ACCESS flag is converted to IN_MODIFIED by * ufs_close() and ufs_getattr() by the calls to * ufs_itimes_locked(), without subsequent UFS_UPDATE(). * Test also all the other timestamp flags too, to pick up * any other cases that could be missed. */ if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { VI_UNLOCK(vp); continue; } if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td)) != 0) continue; if (sync_doupdate(ip)) error = ffs_update(vp, 0); if (error != 0) allerror = error; vput(vp); } qupdate: #ifdef QUOTA qsync(mp); #endif if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) allerror = error; return (allerror); } /* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Note: we are always called with the filesystem marked busy using * vfs_busy(). */ static int ffs_sync(mp, waitfor) struct mount *mp; int waitfor; { struct vnode *mvp, *vp, *devvp; struct thread *td; struct inode *ip; struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs; int error, count, lockreq, allerror = 0; int suspend; int suspended; int secondary_writes; int secondary_accwrites; int softdep_deps; int softdep_accdeps; struct bufobj *bo; suspend = 0; suspended = 0; td = curthread; fs = ump->um_fs; if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0) panic("%s: ffs_sync: modification on read-only filesystem", fs->fs_fsmnt); if (waitfor == MNT_LAZY) { if (!rebooting) return (ffs_sync_lazy(mp)); waitfor = MNT_NOWAIT; } /* * Write back each (modified) inode. */ lockreq = LK_EXCLUSIVE | LK_NOWAIT; if (waitfor == MNT_SUSPEND) { suspend = 1; waitfor = MNT_WAIT; } if (waitfor == MNT_WAIT) lockreq = LK_EXCLUSIVE; lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; loop: /* Grab snapshot of secondary write counts */ MNT_ILOCK(mp); secondary_writes = mp->mnt_secondary_writes; secondary_accwrites = mp->mnt_secondary_accwrites; MNT_IUNLOCK(mp); /* Grab snapshot of softdep dependency counts */ softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * Depend on the vnode interlock to keep things stable enough * for a quick test. Since there might be hundreds of * thousands of vnodes, we cannot afford even a subroutine * call unless there's a good chance that we have work to do. */ if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && vp->v_bufobj.bo_dirty.bv_cnt == 0) { VI_UNLOCK(vp); continue; } if ((error = vget(vp, lockreq, td)) != 0) { if (error == ENOENT || error == ENOLCK) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } continue; } if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0) allerror = error; vput(vp); } /* * Force stale filesystem control information to be flushed. */ if (waitfor == MNT_WAIT || rebooting) { if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) allerror = error; /* Flushed work items may create new vnodes to clean */ if (allerror == 0 && count) goto loop; } #ifdef QUOTA qsync(mp); #endif devvp = ump->um_devvp; bo = &devvp->v_bufobj; BO_LOCK(bo); if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { BO_UNLOCK(bo); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(devvp, waitfor, td); VOP_UNLOCK(devvp, 0); if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) error = ffs_sbupdate(ump, waitfor, 0); if (error != 0) allerror = error; if (allerror == 0 && waitfor == MNT_WAIT) goto loop; } else if (suspend != 0) { if (softdep_check_suspend(mp, devvp, softdep_deps, softdep_accdeps, secondary_writes, secondary_accwrites) != 0) { MNT_IUNLOCK(mp); goto loop; /* More work needed */ } mtx_assert(MNT_MTX(mp), MA_OWNED); mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; MNT_IUNLOCK(mp); suspended = 1; } else BO_UNLOCK(bo); /* * Write back modified superblock. */ if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) allerror = error; return (allerror); } int ffs_vget(mp, ino, flags, vpp) struct mount *mp; ino_t ino; int flags; struct vnode **vpp; { return (ffs_vgetf(mp, ino, flags, vpp, 0)); } int ffs_vgetf(mp, ino, flags, vpp, ffs_flags) struct mount *mp; ino_t ino; int flags; struct vnode **vpp; int ffs_flags; { struct fs *fs; struct inode *ip; struct ufsmount *ump; struct buf *bp; struct vnode *vp; int error; error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* * We must promote to an exclusive lock for vnode creation. This * can happen if lookup is passed LOCKSHARED. */ if ((flags & LK_TYPE_MASK) == LK_SHARED) { flags &= ~LK_TYPE_MASK; flags |= LK_EXCLUSIVE; } /* * We do not lock vnode creation as it is believed to be too * expensive for such rare case as simultaneous creation of vnode * for same ino by different processes. We just allow them to race * and check later to decide who wins. Let the race begin! */ ump = VFSTOUFS(mp); fs = ump->um_fs; ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO); /* Allocate a new vnode/inode. */ error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? &ffs_vnodeops1 : &ffs_vnodeops2, &vp); if (error) { *vpp = NULL; uma_zfree(uma_inode, ip); return (error); } /* * FFS supports recursive locking. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); VN_LOCK_AREC(vp); vp->v_data = ip; vp->v_bufobj.bo_bsize = fs->fs_bsize; ip->i_vnode = vp; ip->i_ump = ump; ip->i_number = ino; ip->i_ea_refs = 0; ip->i_nextclustercg = -1; ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2; #ifdef QUOTA { int i; for (i = 0; i < MAXQUOTAS; i++) ip->i_dquot[i] = NODQUOT; } #endif if (ffs_flags & FFSV_FORCEINSMQ) vp->v_vflag |= VV_FORCEINSMQ; error = insmntque(vp, mp); if (error != 0) { uma_zfree(uma_inode, ip); *vpp = NULL; return (error); } vp->v_vflag &= ~VV_FORCEINSMQ; error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* Read in the disk contents for the inode, copy into the inode. */ error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->fs_bsize, NOCRED, &bp); if (error) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ brelse(bp); vput(vp); *vpp = NULL; return (error); } if (I_IS_UFS1(ip)) ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); else ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); - ffs_load_inode(bp, ip, fs, ino); + if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) { + bqrelse(bp); + vput(vp); + *vpp = NULL; + return (error); + } if (DOINGSOFTDEP(vp)) softdep_load_inodeblock(ip); else ip->i_effnlink = ip->i_nlink; bqrelse(bp); /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. */ error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2, &vp); if (error) { vput(vp); *vpp = NULL; return (error); } /* * Finish inode initialization. */ if (vp->v_type != VFIFO) { /* FFS supports shared locking for all files except fifos. */ VN_LOCK_ASHARE(vp); } /* * Set up a generation number for this inode if it does not * already have one. This should only happen on old filesystems. */ if (ip->i_gen == 0) { while (ip->i_gen == 0) ip->i_gen = arc4random(); if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { ip->i_flag |= IN_MODIFIED; DIP_SET(ip, i_gen, ip->i_gen); } } #ifdef MAC if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { /* * If this vnode is already allocated, and we're running * multi-label, attempt to perform a label association * from the extended attributes on the inode. */ error = mac_vnode_associate_extattr(mp, vp); if (error) { /* ufs_inactive will release ip->i_devvp ref. */ vput(vp); *vpp = NULL; return (error); } } #endif *vpp = vp; return (0); } /* * File handle to vnode * * Have to be really careful about stale file handles: * - check that the inode number is valid * - for UFS2 check that the inode number is initialized * - call ffs_vget() to get the locked inode * - check for an unallocated inode (i_mode == 0) * - check that the given client host has export rights and return * those rights via. exflagsp and credanonp */ static int ffs_fhtovp(mp, fhp, flags, vpp) struct mount *mp; struct fid *fhp; int flags; struct vnode **vpp; { struct ufid *ufhp; struct ufsmount *ump; struct fs *fs; struct cg *cgp; struct buf *bp; ino_t ino; u_int cg; int error; ufhp = (struct ufid *)fhp; ino = ufhp->ufid_ino; ump = VFSTOUFS(mp); fs = ump->um_fs; if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) return (ESTALE); /* * Need to check if inode is initialized because UFS2 does lazy * initialization and nfs_fhtovp can offer arbitrary inode numbers. */ if (fs->fs_magic != FS_UFS2_MAGIC) return (ufs_fhtovp(mp, ufhp, flags, vpp)); cg = ino_to_cg(fs, ino); if ((error = ffs_getcg(fs, ump->um_devvp, cg, &bp, &cgp)) != 0) return (error); if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) { brelse(bp); return (ESTALE); } brelse(bp); return (ufs_fhtovp(mp, ufhp, flags, vpp)); } /* * Initialize the filesystem. */ static int ffs_init(vfsp) struct vfsconf *vfsp; { ffs_susp_initialize(); softdep_initialize(); return (ufs_init(vfsp)); } /* * Undo the work of ffs_init(). */ static int ffs_uninit(vfsp) struct vfsconf *vfsp; { int ret; ret = ufs_uninit(vfsp); softdep_uninitialize(); ffs_susp_uninitialize(); return (ret); } /* * Structure used to pass information from ffs_sbupdate to its * helper routine ffs_use_bwrite. */ struct devfd { struct ufsmount *ump; struct buf *sbbp; int waitfor; int suspended; int error; }; /* * Write a superblock and associated information back to disk. */ int ffs_sbupdate(ump, waitfor, suspended) struct ufsmount *ump; int waitfor; int suspended; { struct fs *fs; struct buf *sbbp; struct devfd devfd; fs = ump->um_fs; if (fs->fs_ronly == 1 && (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0) panic("ffs_sbupdate: write read-only filesystem"); /* * We use the superblock's buf to serialize calls to ffs_sbupdate(). */ sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), (int)fs->fs_sbsize, 0, 0, 0); /* * Initialize info needed for write function. */ devfd.ump = ump; devfd.sbbp = sbbp; devfd.waitfor = waitfor; devfd.suspended = suspended; devfd.error = 0; return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite)); } /* * Write function for use by filesystem-layer routines. */ static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size) { struct devfd *devfdp; struct ufsmount *ump; struct buf *bp; struct fs *fs; int error; devfdp = devfd; ump = devfdp->ump; fs = ump->um_fs; /* * Writing the superblock summary information. */ if (loc != fs->fs_sblockloc) { bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0); bcopy(buf, bp->b_data, (u_int)size); if (devfdp->suspended) bp->b_flags |= B_VALIDSUSPWRT; if (devfdp->waitfor != MNT_WAIT) bawrite(bp); else if ((error = bwrite(bp)) != 0) devfdp->error = error; return (0); } /* * Writing the superblock itself. We need to do special checks for it. */ bp = devfdp->sbbp; if (devfdp->error != 0) { brelse(bp); return (devfdp->error); } if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); fs->fs_sblockloc = SBLOCK_UFS1; } if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); fs->fs_sblockloc = SBLOCK_UFS2; } if (MOUNTEDSOFTDEP(ump->um_mountp)) softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); ffs_oldfscompat_write((struct fs *)bp->b_data, ump); if (devfdp->suspended) bp->b_flags |= B_VALIDSUSPWRT; if (devfdp->waitfor != MNT_WAIT) bawrite(bp); else if ((error = bwrite(bp)) != 0) devfdp->error = error; return (devfdp->error); } static int ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, int attrnamespace, const char *attrname) { #ifdef UFS_EXTATTR return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, attrname)); #else return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)); #endif } static void ffs_ifree(struct ufsmount *ump, struct inode *ip) { if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) uma_zfree(uma_ufs1, ip->i_din1); else if (ip->i_din2 != NULL) uma_zfree(uma_ufs2, ip->i_din2); uma_zfree(uma_inode, ip); } static int dobkgrdwrite = 1; SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, "Do background writes (honoring the BV_BKGRDWRITE flag)?"); /* * Complete a background write started from bwrite. */ static void ffs_backgroundwritedone(struct buf *bp) { struct bufobj *bufobj; struct buf *origbp; /* * Find the original buffer that we are writing. */ bufobj = bp->b_bufobj; BO_LOCK(bufobj); if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) panic("backgroundwritedone: lost buffer"); /* * We should mark the cylinder group buffer origbp as * dirty, to not loose the failed write. */ if ((bp->b_ioflags & BIO_ERROR) != 0) origbp->b_vflags |= BV_BKGRDERR; BO_UNLOCK(bufobj); /* * Process dependencies then return any unfinished ones. */ if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) buf_complete(bp); #ifdef SOFTUPDATES if (!LIST_EMPTY(&bp->b_dep)) softdep_move_dependencies(bp, origbp); #endif /* * This buffer is marked B_NOCACHE so when it is released * by biodone it will be tossed. */ bp->b_flags |= B_NOCACHE; bp->b_flags &= ~B_CACHE; pbrelvp(bp); /* * Prevent brelse() from trying to keep and re-dirtying bp on * errors. It causes b_bufobj dereference in * bdirty()/reassignbuf(), and b_bufobj was cleared in * pbrelvp() above. */ if ((bp->b_ioflags & BIO_ERROR) != 0) bp->b_flags |= B_INVAL; bufdone(bp); BO_LOCK(bufobj); /* * Clear the BV_BKGRDINPROG flag in the original buffer * and awaken it if it is waiting for the write to complete. * If BV_BKGRDINPROG is not set in the original buffer it must * have been released and re-instantiated - which is not legal. */ KASSERT((origbp->b_vflags & BV_BKGRDINPROG), ("backgroundwritedone: lost buffer2")); origbp->b_vflags &= ~BV_BKGRDINPROG; if (origbp->b_vflags & BV_BKGRDWAIT) { origbp->b_vflags &= ~BV_BKGRDWAIT; wakeup(&origbp->b_xflags); } BO_UNLOCK(bufobj); } /* * Write, release buffer on completion. (Done by iodone * if async). Do not bother writing anything if the buffer * is invalid. * * Note that we set B_CACHE here, indicating that buffer is * fully valid and thus cacheable. This is true even of NFS * now so we set it generally. This could be set either here * or in biodone() since the I/O is synchronous. We put it * here. */ static int ffs_bufwrite(struct buf *bp) { struct buf *newbp; struct cg *cgp; CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); if (bp->b_flags & B_INVAL) { brelse(bp); return (0); } if (!BUF_ISLOCKED(bp)) panic("bufwrite: buffer is not busy???"); /* * If a background write is already in progress, delay * writing this block if it is asynchronous. Otherwise * wait for the background write to complete. */ BO_LOCK(bp->b_bufobj); if (bp->b_vflags & BV_BKGRDINPROG) { if (bp->b_flags & B_ASYNC) { BO_UNLOCK(bp->b_bufobj); bdwrite(bp); return (0); } bp->b_vflags |= BV_BKGRDWAIT; msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, "bwrbg", 0); if (bp->b_vflags & BV_BKGRDINPROG) panic("bufwrite: still writing"); } bp->b_vflags &= ~BV_BKGRDERR; BO_UNLOCK(bp->b_bufobj); /* * If this buffer is marked for background writing and we * do not have to wait for it, make a copy and write the * copy so as to leave this buffer ready for further use. * * This optimization eats a lot of memory. If we have a page * or buffer shortfall we can't do it. */ if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && (bp->b_flags & B_ASYNC) && !vm_page_count_severe() && !buf_dirty_count_severe()) { KASSERT(bp->b_iodone == NULL, ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); /* get a new block */ newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); if (newbp == NULL) goto normal_write; KASSERT(buf_mapped(bp), ("Unmapped cg")); memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); BO_LOCK(bp->b_bufobj); bp->b_vflags |= BV_BKGRDINPROG; BO_UNLOCK(bp->b_bufobj); newbp->b_xflags |= (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER; newbp->b_lblkno = bp->b_lblkno; newbp->b_blkno = bp->b_blkno; newbp->b_offset = bp->b_offset; newbp->b_iodone = ffs_backgroundwritedone; newbp->b_flags |= B_ASYNC; newbp->b_flags &= ~B_INVAL; pbgetvp(bp->b_vp, newbp); #ifdef SOFTUPDATES /* * Move over the dependencies. If there are rollbacks, * leave the parent buffer dirtied as it will need to * be written again. */ if (LIST_EMPTY(&bp->b_dep) || softdep_move_dependencies(bp, newbp) == 0) bundirty(bp); #else bundirty(bp); #endif /* * Initiate write on the copy, release the original. The * BKGRDINPROG flag prevents it from going away until * the background write completes. We have to recalculate * its check hash in case the buffer gets freed and then * reconstituted from the buffer cache during a later read. */ if ((bp->b_xflags & BX_CYLGRP) != 0) { cgp = (struct cg *)bp->b_data; cgp->cg_ckhash = 0; cgp->cg_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount); } bqrelse(bp); bp = newbp; } else /* Mark the buffer clean */ bundirty(bp); /* Let the normal bufwrite do the rest for us */ normal_write: /* * If we are writing a cylinder group, update its time. */ if ((bp->b_xflags & BX_CYLGRP) != 0) { cgp = (struct cg *)bp->b_data; cgp->cg_old_time = cgp->cg_time = time_second; } return (bufwrite(bp)); } static void ffs_geom_strategy(struct bufobj *bo, struct buf *bp) { struct vnode *vp; struct buf *tbp; int error, nocopy; vp = bo2vnode(bo); if (bp->b_iocmd == BIO_WRITE) { if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && bp->b_vp != NULL && bp->b_vp->v_mount != NULL && (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) panic("ffs_geom_strategy: bad I/O"); nocopy = bp->b_flags & B_NOCOPY; bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && vp->v_rdev->si_snapdata != NULL) { if ((bp->b_flags & B_CLUSTER) != 0) { runningbufwakeup(bp); TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, b_cluster.cluster_entry) { error = ffs_copyonwrite(vp, tbp); if (error != 0 && error != EOPNOTSUPP) { bp->b_error = error; bp->b_ioflags |= BIO_ERROR; bufdone(bp); return; } } bp->b_runningbufspace = bp->b_bufsize; atomic_add_long(&runningbufspace, bp->b_runningbufspace); } else { error = ffs_copyonwrite(vp, bp); if (error != 0 && error != EOPNOTSUPP) { bp->b_error = error; bp->b_ioflags |= BIO_ERROR; bufdone(bp); return; } } } #ifdef SOFTUPDATES if ((bp->b_flags & B_CLUSTER) != 0) { TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, b_cluster.cluster_entry) { if (!LIST_EMPTY(&tbp->b_dep)) buf_start(tbp); } } else { if (!LIST_EMPTY(&bp->b_dep)) buf_start(bp); } #endif /* * Check for metadata that needs check-hashes and update them. */ switch (bp->b_xflags & BX_FSPRIV) { case BX_CYLGRP: ((struct cg *)bp->b_data)->cg_ckhash = 0; ((struct cg *)bp->b_data)->cg_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount); break; case BX_SUPERBLOCK: case BX_INODE: case BX_INDIR: case BX_DIR: printf("Check-hash write is unimplemented!!!\n"); break; case 0: break; default: printf("multiple buffer types 0x%b\n", (u_int)(bp->b_xflags & BX_FSPRIV), PRINT_UFS_BUF_XFLAGS); break; } } g_vfs_strategy(bo, bp); } int ffs_own_mount(const struct mount *mp) { if (mp->mnt_op == &ufs_vfsops) return (1); return (0); } #ifdef DDB #ifdef SOFTUPDATES /* defined in ffs_softdep.c */ extern void db_print_ffs(struct ufsmount *ump); DB_SHOW_COMMAND(ffs, db_show_ffs) { struct mount *mp; struct ufsmount *ump; if (have_addr) { ump = VFSTOUFS((struct mount *)addr); db_print_ffs(ump); return; } TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) db_print_ffs(VFSTOUFS(mp)); } } #endif /* SOFTUPDATES */ #endif /* DDB */ Index: projects/clang700-import/sys/vm/uma_core.c =================================================================== --- projects/clang700-import/sys/vm/uma_core.c (revision 340426) +++ projects/clang700-import/sys/vm/uma_core.c (revision 340427) @@ -1,4265 +1,4308 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson * Copyright (c) 2004, 2005 Bosko Milekic * Copyright (c) 2004-2006 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * uma_core.c Implementation of the Universal Memory allocator * * This allocator is intended to replace the multitude of similar object caches * in the standard FreeBSD kernel. The intent is to be flexible as well as * efficient. A primary design goal is to return unused memory to the rest of * the system. This will make the system as a whole more flexible due to the * ability to move memory to subsystems which most need it instead of leaving * pools of reserved memory unused. * * The basic ideas stem from similar slab/zone based allocators whose algorithms * are well known. * */ /* * TODO: * - Improve memory usage for large allocations * - Investigate cache size adjustments */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_param.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG_MEMGUARD #include #endif /* * This is the zone and keg from which all zones are spawned. */ static uma_zone_t kegs; static uma_zone_t zones; /* This is the zone from which all offpage uma_slab_ts are allocated. */ static uma_zone_t slabzone; /* * The initial hash tables come out of this zone so they can be allocated * prior to malloc coming up. */ static uma_zone_t hashzone; /* The boot-time adjusted value for cache line alignment. */ int uma_align_cache = 64 - 1; static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); /* * Are we allowed to allocate buckets? */ static int bucketdisable = 1; /* Linked list of all kegs in the system */ static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); /* Linked list of all cache-only zones in the system */ static LIST_HEAD(,uma_zone) uma_cachezones = LIST_HEAD_INITIALIZER(uma_cachezones); /* This RW lock protects the keg list */ static struct rwlock_padalign __exclusive_cache_line uma_rwlock; /* * Pointer and counter to pool of pages, that is preallocated at * startup to bootstrap UMA. */ static char *bootmem; static int boot_pages; static struct sx uma_drain_lock; /* kmem soft limit. */ static unsigned long uma_kmem_limit = LONG_MAX; static volatile unsigned long uma_kmem_total; /* Is the VM done starting up? */ static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS, BOOT_RUNNING } booted = BOOT_COLD; /* * This is the handle used to schedule events that need to happen * outside of the allocation fast path. */ static struct callout uma_callout; #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ /* * This structure is passed as the zone ctor arg so that I don't have to create * a special allocation function just for zones. */ struct uma_zctor_args { const char *name; size_t size; uma_ctor ctor; uma_dtor dtor; uma_init uminit; uma_fini fini; uma_import import; uma_release release; void *arg; uma_keg_t keg; int align; uint32_t flags; }; struct uma_kctor_args { uma_zone_t zone; size_t size; uma_init uminit; uma_fini fini; int align; uint32_t flags; }; struct uma_bucket_zone { uma_zone_t ubz_zone; char *ubz_name; int ubz_entries; /* Number of items it can hold. */ int ubz_maxsize; /* Maximum allocation size per-item. */ }; /* * Compute the actual number of bucket entries to pack them in power * of two sizes for more efficient space utilization. */ #define BUCKET_SIZE(n) \ (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) #define BUCKET_MAX BUCKET_SIZE(256) struct uma_bucket_zone bucket_zones[] = { { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, { NULL, NULL, 0} }; /* * Flags and enumerations to be passed to internal functions. */ enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; #define UMA_ANYDOMAIN -1 /* Special value for domain search. */ /* Prototypes.. */ int uma_startup_count(int); void uma_startup(void *, int); void uma_startup1(void); void uma_startup2(void); static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void page_free(void *, vm_size_t, uint8_t); static void pcpu_page_free(void *, vm_size_t, uint8_t); static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int); static void cache_drain(uma_zone_t); static void bucket_drain(uma_zone_t, uma_bucket_t); static void bucket_cache_drain(uma_zone_t zone); static int keg_ctor(void *, int, void *, int); static void keg_dtor(void *, int, void *); static int zone_ctor(void *, int, void *, int); static void zone_dtor(void *, int, void *); static int zero_init(void *, int, int); static void keg_small_init(uma_keg_t keg); static void keg_large_init(uma_keg_t keg); static void zone_foreach(void (*zfunc)(uma_zone_t)); static void zone_timeout(uma_zone_t zone); static int hash_alloc(struct uma_hash *); static int hash_expand(struct uma_hash *, struct uma_hash *); static void hash_free(struct uma_hash *hash); static void uma_timeout(void *); static void uma_startup3(void); static void *zone_alloc_item(uma_zone_t, void *, int, int); static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); static void bucket_enable(void); static void bucket_init(void); static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); static void bucket_zone_drain(void); static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int); static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, int align, uint32_t flags); static int zone_import(uma_zone_t, void **, int, int, int); static void zone_release(uma_zone_t, void **, int); static void uma_zero_item(void *, uma_zone_t); void uma_print_zone(uma_zone_t); void uma_print_stats(void); static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); #ifdef INVARIANTS static bool uma_dbg_kskip(uma_keg_t keg, void *mem); static bool uma_dbg_zskip(uma_zone_t zone, void *mem); static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0, "Memory allocation debugging"); static u_int dbg_divisor = 1; SYSCTL_UINT(_vm_debug, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0, "Debug & thrash every this item in memory allocator"); static counter_u64_t uma_dbg_cnt = EARLY_COUNTER; static counter_u64_t uma_skip_cnt = EARLY_COUNTER; SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD, &uma_dbg_cnt, "memory items debugged"); SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD, &uma_skip_cnt, "memory items skipped, not debugged"); #endif SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); static int zone_warnings = 1; SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, "Warn when UMA zones becomes full"); /* Adjust bytes under management by UMA. */ static inline void uma_total_dec(unsigned long size) { atomic_subtract_long(&uma_kmem_total, size); } static inline void uma_total_inc(unsigned long size) { if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) uma_reclaim_wakeup(); } /* * This routine checks to see whether or not it's safe to enable buckets. */ static void bucket_enable(void) { bucketdisable = vm_page_count_min(); } /* * Initialize bucket_zones, the array of zones of buckets of various sizes. * * For each zone, calculate the memory required for each bucket, consisting * of the header and an array of pointers. */ static void bucket_init(void) { struct uma_bucket_zone *ubz; int size; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { size = roundup(sizeof(struct uma_bucket), sizeof(void *)); size += sizeof(void *) * ubz->ubz_entries; ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA); } } /* * Given a desired number of entries for a bucket, return the zone from which * to allocate the bucket. */ static struct uma_bucket_zone * bucket_zone_lookup(int entries) { struct uma_bucket_zone *ubz; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) if (ubz->ubz_entries >= entries) return (ubz); ubz--; return (ubz); } static int bucket_select(int size) { struct uma_bucket_zone *ubz; ubz = &bucket_zones[0]; if (size > ubz->ubz_maxsize) return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); for (; ubz->ubz_entries != 0; ubz++) if (ubz->ubz_maxsize < size) break; ubz--; return (ubz->ubz_entries); } static uma_bucket_t bucket_alloc(uma_zone_t zone, void *udata, int flags) { struct uma_bucket_zone *ubz; uma_bucket_t bucket; /* * This is to stop us from allocating per cpu buckets while we're * running out of vm.boot_pages. Otherwise, we would exhaust the * boot pages. This also prevents us from allocating buckets in * low memory situations. */ if (bucketdisable) return (NULL); /* * To limit bucket recursion we store the original zone flags * in a cookie passed via zalloc_arg/zfree_arg. This allows the * NOVM flag to persist even through deep recursions. We also * store ZFLAG_BUCKET once we have recursed attempting to allocate * a bucket for a bucket zone so we do not allow infinite bucket * recursion. This cookie will even persist to frees of unused * buckets via the allocation path or bucket allocations in the * free path. */ if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) udata = (void *)(uintptr_t)zone->uz_flags; else { if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) return (NULL); udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); } if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) flags |= M_NOVM; ubz = bucket_zone_lookup(zone->uz_count); if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) ubz++; bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); if (bucket) { #ifdef INVARIANTS bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); #endif bucket->ub_cnt = 0; bucket->ub_entries = ubz->ubz_entries; } return (bucket); } static void bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) { struct uma_bucket_zone *ubz; KASSERT(bucket->ub_cnt == 0, ("bucket_free: Freeing a non free bucket.")); if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) udata = (void *)(uintptr_t)zone->uz_flags; ubz = bucket_zone_lookup(bucket->ub_entries); uma_zfree_arg(ubz->ubz_zone, bucket, udata); } static void bucket_zone_drain(void) { struct uma_bucket_zone *ubz; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) zone_drain(ubz->ubz_zone); } +static uma_bucket_t +zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws) +{ + uma_bucket_t bucket; + + ZONE_LOCK_ASSERT(zone); + + if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { + MPASS(zdom->uzd_nitems >= bucket->ub_cnt); + LIST_REMOVE(bucket, ub_link); + zdom->uzd_nitems -= bucket->ub_cnt; + if (ws && zdom->uzd_imin > zdom->uzd_nitems) + zdom->uzd_imin = zdom->uzd_nitems; + } + return (bucket); +} + static void +zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, + const bool ws) +{ + + ZONE_LOCK_ASSERT(zone); + + LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); + zdom->uzd_nitems += bucket->ub_cnt; + if (ws && zdom->uzd_imax < zdom->uzd_nitems) + zdom->uzd_imax = zdom->uzd_nitems; +} + +static void zone_log_warning(uma_zone_t zone) { static const struct timeval warninterval = { 300, 0 }; if (!zone_warnings || zone->uz_warning == NULL) return; if (ratecheck(&zone->uz_ratecheck, &warninterval)) printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); } static inline void zone_maxaction(uma_zone_t zone) { if (zone->uz_maxaction.ta_func != NULL) taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); } static void zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) { uma_klink_t klink; LIST_FOREACH(klink, &zone->uz_kegs, kl_link) kegfn(klink->kl_keg); } /* * Routine called by timeout which is used to fire off some time interval * based calculations. (stats, hash size, etc.) * * Arguments: * arg Unused * * Returns: * Nothing */ static void uma_timeout(void *unused) { bucket_enable(); zone_foreach(zone_timeout); /* Reschedule this event */ callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); } /* + * Update the working set size estimate for the zone's bucket cache. + * The constants chosen here are somewhat arbitrary. With an update period of + * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the + * last 100s. + */ +static void +zone_domain_update_wss(uma_zone_domain_t zdom) +{ + long wss; + + MPASS(zdom->uzd_imax >= zdom->uzd_imin); + wss = zdom->uzd_imax - zdom->uzd_imin; + zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems; + zdom->uzd_wss = (3 * wss + 2 * zdom->uzd_wss) / 5; +} + +/* * Routine to perform timeout driven calculations. This expands the * hashes and does per cpu statistics aggregation. * * Returns nothing. */ static void keg_timeout(uma_keg_t keg) { KEG_LOCK(keg); /* * Expand the keg hash table. * * This is done if the number of slabs is larger than the hash size. * What I'm trying to do here is completely reduce collisions. This * may be a little aggressive. Should I allow for two collisions max? */ if (keg->uk_flags & UMA_ZONE_HASH && keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { struct uma_hash newhash; struct uma_hash oldhash; int ret; /* * This is so involved because allocating and freeing * while the keg lock is held will lead to deadlock. * I have to do everything in stages and check for * races. */ newhash = keg->uk_hash; KEG_UNLOCK(keg); ret = hash_alloc(&newhash); KEG_LOCK(keg); if (ret) { if (hash_expand(&keg->uk_hash, &newhash)) { oldhash = keg->uk_hash; keg->uk_hash = newhash; } else oldhash = newhash; KEG_UNLOCK(keg); hash_free(&oldhash); return; } } KEG_UNLOCK(keg); } static void zone_timeout(uma_zone_t zone) { + int i; zone_foreach_keg(zone, &keg_timeout); + + ZONE_LOCK(zone); + for (i = 0; i < vm_ndomains; i++) + zone_domain_update_wss(&zone->uz_domain[i]); + ZONE_UNLOCK(zone); } /* * Allocate and zero fill the next sized hash table from the appropriate * backing store. * * Arguments: * hash A new hash structure with the old hash size in uh_hashsize * * Returns: * 1 on success and 0 on failure. */ static int hash_alloc(struct uma_hash *hash) { int oldsize; int alloc; oldsize = hash->uh_hashsize; /* We're just going to go to a power of two greater */ if (oldsize) { hash->uh_hashsize = oldsize * 2; alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; hash->uh_slab_hash = (struct slabhead *)malloc(alloc, M_UMAHASH, M_NOWAIT); } else { alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, UMA_ANYDOMAIN, M_WAITOK); hash->uh_hashsize = UMA_HASH_SIZE_INIT; } if (hash->uh_slab_hash) { bzero(hash->uh_slab_hash, alloc); hash->uh_hashmask = hash->uh_hashsize - 1; return (1); } return (0); } /* * Expands the hash table for HASH zones. This is done from zone_timeout * to reduce collisions. This must not be done in the regular allocation * path, otherwise, we can recurse on the vm while allocating pages. * * Arguments: * oldhash The hash you want to expand * newhash The hash structure for the new table * * Returns: * Nothing * * Discussion: */ static int hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) { uma_slab_t slab; int hval; int i; if (!newhash->uh_slab_hash) return (0); if (oldhash->uh_hashsize >= newhash->uh_hashsize) return (0); /* * I need to investigate hash algorithms for resizing without a * full rehash. */ for (i = 0; i < oldhash->uh_hashsize; i++) while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); hval = UMA_HASH(newhash, slab->us_data); SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], slab, us_hlink); } return (1); } /* * Free the hash bucket to the appropriate backing store. * * Arguments: * slab_hash The hash bucket we're freeing * hashsize The number of entries in that hash bucket * * Returns: * Nothing */ static void hash_free(struct uma_hash *hash) { if (hash->uh_slab_hash == NULL) return; if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); else free(hash->uh_slab_hash, M_UMAHASH); } /* * Frees all outstanding items in a bucket * * Arguments: * zone The zone to free to, must be unlocked. * bucket The free/alloc bucket with items, cpu queue must be locked. * * Returns: * Nothing */ static void bucket_drain(uma_zone_t zone, uma_bucket_t bucket) { int i; if (bucket == NULL) return; if (zone->uz_fini) for (i = 0; i < bucket->ub_cnt; i++) zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); bucket->ub_cnt = 0; } /* * Drains the per cpu caches for a zone. * * NOTE: This may only be called while the zone is being turn down, and not * during normal operation. This is necessary in order that we do not have * to migrate CPUs to drain the per-CPU caches. * * Arguments: * zone The zone to drain, must be unlocked. * * Returns: * Nothing */ static void cache_drain(uma_zone_t zone) { uma_cache_t cache; int cpu; /* * XXX: It is safe to not lock the per-CPU caches, because we're * tearing down the zone anyway. I.e., there will be no further use * of the caches at this point. * * XXX: It would good to be able to assert that the zone is being * torn down to prevent improper use of cache_drain(). * * XXX: We lock the zone before passing into bucket_cache_drain() as * it is used elsewhere. Should the tear-down path be made special * there in some form? */ CPU_FOREACH(cpu) { cache = &zone->uz_cpu[cpu]; bucket_drain(zone, cache->uc_allocbucket); bucket_drain(zone, cache->uc_freebucket); if (cache->uc_allocbucket != NULL) bucket_free(zone, cache->uc_allocbucket, NULL); if (cache->uc_freebucket != NULL) bucket_free(zone, cache->uc_freebucket, NULL); cache->uc_allocbucket = cache->uc_freebucket = NULL; } ZONE_LOCK(zone); bucket_cache_drain(zone); ZONE_UNLOCK(zone); } static void cache_shrink(uma_zone_t zone) { if (zone->uz_flags & UMA_ZFLAG_INTERNAL) return; ZONE_LOCK(zone); zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; ZONE_UNLOCK(zone); } static void cache_drain_safe_cpu(uma_zone_t zone) { uma_cache_t cache; uma_bucket_t b1, b2; int domain; if (zone->uz_flags & UMA_ZFLAG_INTERNAL) return; b1 = b2 = NULL; ZONE_LOCK(zone); critical_enter(); if (zone->uz_flags & UMA_ZONE_NUMA) domain = PCPU_GET(domain); else domain = 0; cache = &zone->uz_cpu[curcpu]; if (cache->uc_allocbucket) { if (cache->uc_allocbucket->ub_cnt != 0) - LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, - cache->uc_allocbucket, ub_link); + zone_put_bucket(zone, &zone->uz_domain[domain], + cache->uc_allocbucket, false); else b1 = cache->uc_allocbucket; cache->uc_allocbucket = NULL; } if (cache->uc_freebucket) { if (cache->uc_freebucket->ub_cnt != 0) - LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, - cache->uc_freebucket, ub_link); + zone_put_bucket(zone, &zone->uz_domain[domain], + cache->uc_freebucket, false); else b2 = cache->uc_freebucket; cache->uc_freebucket = NULL; } critical_exit(); ZONE_UNLOCK(zone); if (b1) bucket_free(zone, b1, NULL); if (b2) bucket_free(zone, b2, NULL); } /* * Safely drain per-CPU caches of a zone(s) to alloc bucket. * This is an expensive call because it needs to bind to all CPUs * one by one and enter a critical section on each of them in order * to safely access their cache buckets. * Zone lock must not be held on call this function. */ static void cache_drain_safe(uma_zone_t zone) { int cpu; /* * Polite bucket sizes shrinking was not enouth, shrink aggressively. */ if (zone) cache_shrink(zone); else zone_foreach(cache_shrink); CPU_FOREACH(cpu) { thread_lock(curthread); sched_bind(curthread, cpu); thread_unlock(curthread); if (zone) cache_drain_safe_cpu(zone); else zone_foreach(cache_drain_safe_cpu); } thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } /* * Drain the cached buckets from a zone. Expects a locked zone on entry. */ static void bucket_cache_drain(uma_zone_t zone) { uma_zone_domain_t zdom; uma_bucket_t bucket; int i; /* * Drain the bucket queues and free the buckets. */ for (i = 0; i < vm_ndomains; i++) { zdom = &zone->uz_domain[i]; - while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { - LIST_REMOVE(bucket, ub_link); + while ((bucket = zone_try_fetch_bucket(zone, zdom, false)) != + NULL) { ZONE_UNLOCK(zone); bucket_drain(zone, bucket); bucket_free(zone, bucket, NULL); ZONE_LOCK(zone); } } /* * Shrink further bucket sizes. Price of single zone lock collision * is probably lower then price of global cache drain. */ if (zone->uz_count > zone->uz_count_min) zone->uz_count--; } static void keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) { uint8_t *mem; int i; uint8_t flags; CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); mem = slab->us_data; flags = slab->us_flags; i = start; if (keg->uk_fini != NULL) { for (i--; i > -1; i--) #ifdef INVARIANTS /* * trash_fini implies that dtor was trash_dtor. trash_fini * would check that memory hasn't been modified since free, * which executed trash_dtor. * That's why we need to run uma_dbg_kskip() check here, * albeit we don't make skip check for other init/fini * invocations. */ if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) || keg->uk_fini != trash_fini) #endif keg->uk_fini(slab->us_data + (keg->uk_rsize * i), keg->uk_size); } if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); uma_total_dec(PAGE_SIZE * keg->uk_ppera); } /* * Frees pages from a keg back to the system. This is done on demand from * the pageout daemon. * * Returns nothing. */ static void keg_drain(uma_keg_t keg) { struct slabhead freeslabs = { 0 }; uma_domain_t dom; uma_slab_t slab, tmp; int i; /* * We don't want to take pages from statically allocated kegs at this * time */ if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) return; CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u", keg->uk_name, keg, keg->uk_free); KEG_LOCK(keg); if (keg->uk_free == 0) goto finished; for (i = 0; i < vm_ndomains; i++) { dom = &keg->uk_domain[i]; LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) { /* We have nowhere to free these to. */ if (slab->us_flags & UMA_SLAB_BOOT) continue; LIST_REMOVE(slab, us_link); keg->uk_pages -= keg->uk_ppera; keg->uk_free -= keg->uk_ipers; if (keg->uk_flags & UMA_ZONE_HASH) UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); } } finished: KEG_UNLOCK(keg); while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); keg_free_slab(keg, slab, keg->uk_ipers); } } static void zone_drain_wait(uma_zone_t zone, int waitok) { /* * Set draining to interlock with zone_dtor() so we can release our * locks as we go. Only dtor() should do a WAITOK call since it * is the only call that knows the structure will still be available * when it wakes up. */ ZONE_LOCK(zone); while (zone->uz_flags & UMA_ZFLAG_DRAINING) { if (waitok == M_NOWAIT) goto out; msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); } zone->uz_flags |= UMA_ZFLAG_DRAINING; bucket_cache_drain(zone); ZONE_UNLOCK(zone); /* * The DRAINING flag protects us from being freed while * we're running. Normally the uma_rwlock would protect us but we * must be able to release and acquire the right lock for each keg. */ zone_foreach_keg(zone, &keg_drain); ZONE_LOCK(zone); zone->uz_flags &= ~UMA_ZFLAG_DRAINING; wakeup(zone); out: ZONE_UNLOCK(zone); } void zone_drain(uma_zone_t zone) { zone_drain_wait(zone, M_NOWAIT); } /* * Allocate a new slab for a keg. This does not insert the slab onto a list. * If the allocation was successful, the keg lock will be held upon return, * otherwise the keg will be left unlocked. * * Arguments: * wait Shall we wait? * * Returns: * The slab that was allocated or NULL if there is no memory and the * caller specified M_NOWAIT. */ static uma_slab_t keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait) { uma_alloc allocf; uma_slab_t slab; unsigned long size; uint8_t *mem; uint8_t flags; int i; KASSERT(domain >= 0 && domain < vm_ndomains, ("keg_alloc_slab: domain %d out of range", domain)); mtx_assert(&keg->uk_lock, MA_OWNED); allocf = keg->uk_allocf; KEG_UNLOCK(keg); slab = NULL; mem = NULL; if (keg->uk_flags & UMA_ZONE_OFFPAGE) { slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); if (slab == NULL) goto out; } /* * This reproduces the old vm_zone behavior of zero filling pages the * first time they are added to a zone. * * Malloced items are zeroed in uma_zalloc. */ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) wait |= M_ZERO; else wait &= ~M_ZERO; if (keg->uk_flags & UMA_ZONE_NODUMP) wait |= M_NODUMP; /* zone is passed for legacy reasons. */ size = keg->uk_ppera * PAGE_SIZE; mem = allocf(zone, size, domain, &flags, wait); if (mem == NULL) { if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); slab = NULL; goto out; } uma_total_inc(size); /* Point the slab into the allocated memory */ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) slab = (uma_slab_t )(mem + keg->uk_pgoff); if (keg->uk_flags & UMA_ZONE_VTOSLAB) for (i = 0; i < keg->uk_ppera; i++) vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); slab->us_keg = keg; slab->us_data = mem; slab->us_freecount = keg->uk_ipers; slab->us_flags = flags; slab->us_domain = domain; BIT_FILL(SLAB_SETSIZE, &slab->us_free); #ifdef INVARIANTS BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); #endif if (keg->uk_init != NULL) { for (i = 0; i < keg->uk_ipers; i++) if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), keg->uk_size, wait) != 0) break; if (i != keg->uk_ipers) { keg_free_slab(keg, slab, i); slab = NULL; goto out; } } KEG_LOCK(keg); CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", slab, keg->uk_name, keg); if (keg->uk_flags & UMA_ZONE_HASH) UMA_HASH_INSERT(&keg->uk_hash, slab, mem); keg->uk_pages += keg->uk_ppera; keg->uk_free += keg->uk_ipers; out: return (slab); } /* * This function is intended to be used early on in place of page_alloc() so * that we may use the boot time page cache to satisfy allocations before * the VM is ready. */ static void * startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait) { uma_keg_t keg; void *mem; int pages; keg = zone_first_keg(zone); /* * If we are in BOOT_BUCKETS or higher, than switch to real * allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC. */ switch (booted) { case BOOT_COLD: case BOOT_STRAPPED: break; case BOOT_PAGEALLOC: if (keg->uk_ppera > 1) break; case BOOT_BUCKETS: case BOOT_RUNNING: #ifdef UMA_MD_SMALL_ALLOC keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; #else keg->uk_allocf = page_alloc; #endif return keg->uk_allocf(zone, bytes, domain, pflag, wait); } /* * Check our small startup cache to see if it has pages remaining. */ pages = howmany(bytes, PAGE_SIZE); KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__)); if (pages > boot_pages) panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name); #ifdef DIAGNOSTIC printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name, boot_pages); #endif mem = bootmem; boot_pages -= pages; bootmem += pages * PAGE_SIZE; *pflag = UMA_SLAB_BOOT; return (mem); } /* * Allocates a number of pages from the system * * Arguments: * bytes The number of bytes requested * wait Shall we wait? * * Returns: * A pointer to the alloced memory or possibly * NULL if M_NOWAIT is set. */ static void * page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait) { void *p; /* Returned page */ *pflag = UMA_SLAB_KERNEL; p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait); return (p); } static void * pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait) { struct pglist alloctail; vm_offset_t addr, zkva; int cpu, flags; vm_page_t p, p_next; #ifdef NUMA struct pcpu *pc; #endif MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE); TAILQ_INIT(&alloctail); flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | malloc2vm_flags(wait); *pflag = UMA_SLAB_KERNEL; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (CPU_ABSENT(cpu)) { p = vm_page_alloc(NULL, 0, flags); } else { #ifndef NUMA p = vm_page_alloc(NULL, 0, flags); #else pc = pcpu_find(cpu); p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags); if (__predict_false(p == NULL)) p = vm_page_alloc(NULL, 0, flags); #endif } if (__predict_false(p == NULL)) goto fail; TAILQ_INSERT_TAIL(&alloctail, p, listq); } if ((addr = kva_alloc(bytes)) == 0) goto fail; zkva = addr; TAILQ_FOREACH(p, &alloctail, listq) { pmap_qenter(zkva, &p, 1); zkva += PAGE_SIZE; } return ((void*)addr); fail: TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { vm_page_unwire(p, PQ_NONE); vm_page_free(p); } return (NULL); } /* * Allocates a number of pages from within an object * * Arguments: * bytes The number of bytes requested * wait Shall we wait? * * Returns: * A pointer to the alloced memory or possibly * NULL if M_NOWAIT is set. */ static void * noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, int wait) { TAILQ_HEAD(, vm_page) alloctail; u_long npages; vm_offset_t retkva, zkva; vm_page_t p, p_next; uma_keg_t keg; TAILQ_INIT(&alloctail); keg = zone_first_keg(zone); npages = howmany(bytes, PAGE_SIZE); while (npages > 0) { p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : VM_ALLOC_NOWAIT)); if (p != NULL) { /* * Since the page does not belong to an object, its * listq is unused. */ TAILQ_INSERT_TAIL(&alloctail, p, listq); npages--; continue; } /* * Page allocation failed, free intermediate pages and * exit. */ TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { vm_page_unwire(p, PQ_NONE); vm_page_free(p); } return (NULL); } *flags = UMA_SLAB_PRIV; zkva = keg->uk_kva + atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); retkva = zkva; TAILQ_FOREACH(p, &alloctail, listq) { pmap_qenter(zkva, &p, 1); zkva += PAGE_SIZE; } return ((void *)retkva); } /* * Frees a number of pages to the system * * Arguments: * mem A pointer to the memory to be freed * size The size of the memory being freed * flags The original p->us_flags field * * Returns: * Nothing */ static void page_free(void *mem, vm_size_t size, uint8_t flags) { if ((flags & UMA_SLAB_KERNEL) == 0) panic("UMA: page_free used with invalid flags %x", flags); kmem_free((vm_offset_t)mem, size); } /* * Frees pcpu zone allocations * * Arguments: * mem A pointer to the memory to be freed * size The size of the memory being freed * flags The original p->us_flags field * * Returns: * Nothing */ static void pcpu_page_free(void *mem, vm_size_t size, uint8_t flags) { vm_offset_t sva, curva; vm_paddr_t paddr; vm_page_t m; MPASS(size == (mp_maxid+1)*PAGE_SIZE); sva = (vm_offset_t)mem; for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { paddr = pmap_kextract(curva); m = PHYS_TO_VM_PAGE(paddr); vm_page_unwire(m, PQ_NONE); vm_page_free(m); } pmap_qremove(sva, size >> PAGE_SHIFT); kva_free(sva, size); } /* * Zero fill initializer * * Arguments/Returns follow uma_init specifications */ static int zero_init(void *mem, int size, int flags) { bzero(mem, size); return (0); } /* * Finish creating a small uma keg. This calculates ipers, and the keg size. * * Arguments * keg The zone we should initialize * * Returns * Nothing */ static void keg_small_init(uma_keg_t keg) { u_int rsize; u_int memused; u_int wastedspace; u_int shsize; u_int slabsize; if (keg->uk_flags & UMA_ZONE_PCPU) { u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; slabsize = UMA_PCPU_ALLOC_SIZE; keg->uk_ppera = ncpus; } else { slabsize = UMA_SLAB_SIZE; keg->uk_ppera = 1; } /* * Calculate the size of each allocation (rsize) according to * alignment. If the requested size is smaller than we have * allocation bits for we round it up. */ rsize = keg->uk_size; if (rsize < slabsize / SLAB_SETSIZE) rsize = slabsize / SLAB_SETSIZE; if (rsize & keg->uk_align) rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); keg->uk_rsize = rsize; KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || keg->uk_rsize < UMA_PCPU_ALLOC_SIZE, ("%s: size %u too large", __func__, keg->uk_rsize)); if (keg->uk_flags & UMA_ZONE_OFFPAGE) shsize = 0; else shsize = sizeof(struct uma_slab); if (rsize <= slabsize - shsize) keg->uk_ipers = (slabsize - shsize) / rsize; else { /* Handle special case when we have 1 item per slab, so * alignment requirement can be relaxed. */ KASSERT(keg->uk_size <= slabsize - shsize, ("%s: size %u greater than slab", __func__, keg->uk_size)); keg->uk_ipers = 1; } KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); memused = keg->uk_ipers * rsize + shsize; wastedspace = slabsize - memused; /* * We can't do OFFPAGE if we're internal or if we've been * asked to not go to the VM for buckets. If we do this we * may end up going to the VM for slabs which we do not * want to do if we're UMA_ZFLAG_CACHEONLY as a result * of UMA_ZONE_VM, which clearly forbids it. */ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) return; /* * See if using an OFFPAGE slab will limit our waste. Only do * this if it permits more items per-slab. * * XXX We could try growing slabsize to limit max waste as well. * Historically this was not done because the VM could not * efficiently handle contiguous allocations. */ if ((wastedspace >= slabsize / UMA_MAX_WASTE) && (keg->uk_ipers < (slabsize / keg->uk_rsize))) { keg->uk_ipers = slabsize / keg->uk_rsize; KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); CTR6(KTR_UMA, "UMA decided we need offpage slab headers for " "keg: %s(%p), calculated wastedspace = %d, " "maximum wasted space allowed = %d, " "calculated ipers = %d, " "new wasted space = %d\n", keg->uk_name, keg, wastedspace, slabsize / UMA_MAX_WASTE, keg->uk_ipers, slabsize - keg->uk_ipers * keg->uk_rsize); keg->uk_flags |= UMA_ZONE_OFFPAGE; } if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) keg->uk_flags |= UMA_ZONE_HASH; } /* * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be * more complicated. * * Arguments * keg The keg we should initialize * * Returns * Nothing */ static void keg_large_init(uma_keg_t keg) { u_int shsize; KASSERT(keg != NULL, ("Keg is null in keg_large_init")); KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); keg->uk_ipers = 1; keg->uk_rsize = keg->uk_size; /* Check whether we have enough space to not do OFFPAGE. */ if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { shsize = sizeof(struct uma_slab); if (shsize & UMA_ALIGN_PTR) shsize = (shsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) { /* * We can't do OFFPAGE if we're internal, in which case * we need an extra page per allocation to contain the * slab header. */ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0) keg->uk_flags |= UMA_ZONE_OFFPAGE; else keg->uk_ppera++; } } if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) keg->uk_flags |= UMA_ZONE_HASH; } static void keg_cachespread_init(uma_keg_t keg) { int alignsize; int trailer; int pages; int rsize; KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); alignsize = keg->uk_align + 1; rsize = keg->uk_size; /* * We want one item to start on every align boundary in a page. To * do this we will span pages. We will also extend the item by the * size of align if it is an even multiple of align. Otherwise, it * would fall on the same boundary every time. */ if (rsize & keg->uk_align) rsize = (rsize & ~keg->uk_align) + alignsize; if ((rsize & alignsize) == 0) rsize += alignsize; trailer = rsize - keg->uk_size; pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; pages = MIN(pages, (128 * 1024) / PAGE_SIZE); keg->uk_rsize = rsize; keg->uk_ppera = pages; keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; KASSERT(keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, keg->uk_ipers)); } /* * Keg header ctor. This initializes all fields, locks, etc. And inserts * the keg onto the global keg list. * * Arguments/Returns follow uma_ctor specifications * udata Actually uma_kctor_args */ static int keg_ctor(void *mem, int size, void *udata, int flags) { struct uma_kctor_args *arg = udata; uma_keg_t keg = mem; uma_zone_t zone; bzero(keg, size); keg->uk_size = arg->size; keg->uk_init = arg->uminit; keg->uk_fini = arg->fini; keg->uk_align = arg->align; keg->uk_free = 0; keg->uk_reserve = 0; keg->uk_pages = 0; keg->uk_flags = arg->flags; keg->uk_slabzone = NULL; /* * We use a global round-robin policy by default. Zones with * UMA_ZONE_NUMA set will use first-touch instead, in which case the * iterator is never run. */ keg->uk_dr.dr_policy = DOMAINSET_RR(); keg->uk_dr.dr_iter = 0; /* * The master zone is passed to us at keg-creation time. */ zone = arg->zone; keg->uk_name = zone->uz_name; if (arg->flags & UMA_ZONE_VM) keg->uk_flags |= UMA_ZFLAG_CACHEONLY; if (arg->flags & UMA_ZONE_ZINIT) keg->uk_init = zero_init; if (arg->flags & UMA_ZONE_MALLOC) keg->uk_flags |= UMA_ZONE_VTOSLAB; if (arg->flags & UMA_ZONE_PCPU) #ifdef SMP keg->uk_flags |= UMA_ZONE_OFFPAGE; #else keg->uk_flags &= ~UMA_ZONE_PCPU; #endif if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { keg_cachespread_init(keg); } else { if (keg->uk_size > UMA_SLAB_SPACE) keg_large_init(keg); else keg_small_init(keg); } if (keg->uk_flags & UMA_ZONE_OFFPAGE) keg->uk_slabzone = slabzone; /* * If we haven't booted yet we need allocations to go through the * startup cache until the vm is ready. */ if (booted < BOOT_PAGEALLOC) keg->uk_allocf = startup_alloc; #ifdef UMA_MD_SMALL_ALLOC else if (keg->uk_ppera == 1) keg->uk_allocf = uma_small_alloc; #endif else if (keg->uk_flags & UMA_ZONE_PCPU) keg->uk_allocf = pcpu_page_alloc; else keg->uk_allocf = page_alloc; #ifdef UMA_MD_SMALL_ALLOC if (keg->uk_ppera == 1) keg->uk_freef = uma_small_free; else #endif if (keg->uk_flags & UMA_ZONE_PCPU) keg->uk_freef = pcpu_page_free; else keg->uk_freef = page_free; /* * Initialize keg's lock */ KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); /* * If we're putting the slab header in the actual page we need to * figure out where in each page it goes. This calculates a right * justified offset into the memory on an ALIGN_PTR boundary. */ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { u_int totsize; /* Size of the slab struct and free list */ totsize = sizeof(struct uma_slab); if (totsize & UMA_ALIGN_PTR) totsize = (totsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; /* * The only way the following is possible is if with our * UMA_ALIGN_PTR adjustments we are now bigger than * UMA_SLAB_SIZE. I haven't checked whether this is * mathematically possible for all cases, so we make * sure here anyway. */ totsize = keg->uk_pgoff + sizeof(struct uma_slab); if (totsize > PAGE_SIZE * keg->uk_ppera) { printf("zone %s ipers %d rsize %d size %d\n", zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size); panic("UMA slab won't fit."); } } if (keg->uk_flags & UMA_ZONE_HASH) hash_alloc(&keg->uk_hash); CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", keg, zone->uz_name, zone, (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, keg->uk_free); LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); rw_wlock(&uma_rwlock); LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); rw_wunlock(&uma_rwlock); return (0); } /* * Zone header ctor. This initializes all fields, locks, etc. * * Arguments/Returns follow uma_ctor specifications * udata Actually uma_zctor_args */ static int zone_ctor(void *mem, int size, void *udata, int flags) { struct uma_zctor_args *arg = udata; uma_zone_t zone = mem; uma_zone_t z; uma_keg_t keg; bzero(zone, size); zone->uz_name = arg->name; zone->uz_ctor = arg->ctor; zone->uz_dtor = arg->dtor; zone->uz_slab = zone_fetch_slab; zone->uz_init = NULL; zone->uz_fini = NULL; zone->uz_allocs = 0; zone->uz_frees = 0; zone->uz_fails = 0; zone->uz_sleeps = 0; zone->uz_count = 0; zone->uz_count_min = 0; zone->uz_flags = 0; zone->uz_warning = NULL; /* The domain structures follow the cpu structures. */ zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; timevalclear(&zone->uz_ratecheck); keg = arg->keg; ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); /* * This is a pure cache zone, no kegs. */ if (arg->import) { if (arg->flags & UMA_ZONE_VM) arg->flags |= UMA_ZFLAG_CACHEONLY; zone->uz_flags = arg->flags; zone->uz_size = arg->size; zone->uz_import = arg->import; zone->uz_release = arg->release; zone->uz_arg = arg->arg; zone->uz_lockptr = &zone->uz_lock; rw_wlock(&uma_rwlock); LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); rw_wunlock(&uma_rwlock); goto out; } /* * Use the regular zone/keg/slab allocator. */ zone->uz_import = (uma_import)zone_import; zone->uz_release = (uma_release)zone_release; zone->uz_arg = zone; if (arg->flags & UMA_ZONE_SECONDARY) { KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); zone->uz_init = arg->uminit; zone->uz_fini = arg->fini; zone->uz_lockptr = &keg->uk_lock; zone->uz_flags |= UMA_ZONE_SECONDARY; rw_wlock(&uma_rwlock); ZONE_LOCK(zone); LIST_FOREACH(z, &keg->uk_zones, uz_link) { if (LIST_NEXT(z, uz_link) == NULL) { LIST_INSERT_AFTER(z, zone, uz_link); break; } } ZONE_UNLOCK(zone); rw_wunlock(&uma_rwlock); } else if (keg == NULL) { if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, arg->align, arg->flags)) == NULL) return (ENOMEM); } else { struct uma_kctor_args karg; int error; /* We should only be here from uma_startup() */ karg.size = arg->size; karg.uminit = arg->uminit; karg.fini = arg->fini; karg.align = arg->align; karg.flags = arg->flags; karg.zone = zone; error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, flags); if (error) return (error); } /* * Link in the first keg. */ zone->uz_klink.kl_keg = keg; LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); zone->uz_lockptr = &keg->uk_lock; zone->uz_size = keg->uk_size; zone->uz_flags |= (keg->uk_flags & (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); /* * Some internal zones don't have room allocated for the per cpu * caches. If we're internal, bail out here. */ if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, ("Secondary zone requested UMA_ZFLAG_INTERNAL")); return (0); } out: KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) != (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET), ("Invalid zone flag combination")); if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) zone->uz_count = BUCKET_MAX; else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0) zone->uz_count = 0; else zone->uz_count = bucket_select(zone->uz_size); zone->uz_count_min = zone->uz_count; return (0); } /* * Keg header dtor. This frees all data, destroys locks, frees the hash * table and removes the keg from the global list. * * Arguments/Returns follow uma_dtor specifications * udata unused */ static void keg_dtor(void *arg, int size, void *udata) { uma_keg_t keg; keg = (uma_keg_t)arg; KEG_LOCK(keg); if (keg->uk_free != 0) { printf("Freed UMA keg (%s) was not empty (%d items). " " Lost %d pages of memory.\n", keg->uk_name ? keg->uk_name : "", keg->uk_free, keg->uk_pages); } KEG_UNLOCK(keg); hash_free(&keg->uk_hash); KEG_LOCK_FINI(keg); } /* * Zone header dtor. * * Arguments/Returns follow uma_dtor specifications * udata unused */ static void zone_dtor(void *arg, int size, void *udata) { uma_klink_t klink; uma_zone_t zone; uma_keg_t keg; zone = (uma_zone_t)arg; keg = zone_first_keg(zone); if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) cache_drain(zone); rw_wlock(&uma_rwlock); LIST_REMOVE(zone, uz_link); rw_wunlock(&uma_rwlock); /* * XXX there are some races here where * the zone can be drained but zone lock * released and then refilled before we * remove it... we dont care for now */ zone_drain_wait(zone, M_WAITOK); /* * Unlink all of our kegs. */ while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { klink->kl_keg = NULL; LIST_REMOVE(klink, kl_link); if (klink == &zone->uz_klink) continue; free(klink, M_TEMP); } /* * We only destroy kegs from non secondary zones. */ if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { rw_wlock(&uma_rwlock); LIST_REMOVE(keg, uk_link); rw_wunlock(&uma_rwlock); zone_free_item(kegs, keg, NULL, SKIP_NONE); } ZONE_LOCK_FINI(zone); } /* * Traverses every zone in the system and calls a callback * * Arguments: * zfunc A pointer to a function which accepts a zone * as an argument. * * Returns: * Nothing */ static void zone_foreach(void (*zfunc)(uma_zone_t)) { uma_keg_t keg; uma_zone_t zone; rw_rlock(&uma_rwlock); LIST_FOREACH(keg, &uma_kegs, uk_link) { LIST_FOREACH(zone, &keg->uk_zones, uz_link) zfunc(zone); } rw_runlock(&uma_rwlock); } /* * Count how many pages do we need to bootstrap. VM supplies * its need in early zones in the argument, we add up our zones, * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The * zone of zones and zone of kegs are accounted separately. */ #define UMA_BOOT_ZONES 11 /* Zone of zones and zone of kegs have arbitrary alignment. */ #define UMA_BOOT_ALIGN 32 static int zsize, ksize; int uma_startup_count(int vm_zones) { int zones, pages; ksize = sizeof(struct uma_keg) + (sizeof(struct uma_domain) * vm_ndomains); zsize = sizeof(struct uma_zone) + (sizeof(struct uma_cache) * (mp_maxid + 1)) + (sizeof(struct uma_zone_domain) * vm_ndomains); /* * Memory for the zone of kegs and its keg, * and for zone of zones. */ pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 + roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE); #ifdef UMA_MD_SMALL_ALLOC zones = UMA_BOOT_ZONES; #else zones = UMA_BOOT_ZONES + vm_zones; vm_zones = 0; #endif /* Memory for the rest of startup zones, UMA and VM, ... */ if (zsize > UMA_SLAB_SPACE) pages += (zones + vm_zones) * howmany(roundup2(zsize, UMA_BOOT_ALIGN), UMA_SLAB_SIZE); else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE) pages += zones; else pages += howmany(zones, UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN)); /* ... and their kegs. Note that zone of zones allocates a keg! */ pages += howmany(zones + 1, UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN)); /* * Most of startup zones are not going to be offpages, that's * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all * calculations. Some large bucket zones will be offpage, and * thus will allocate hashes. We take conservative approach * and assume that all zones may allocate hash. This may give * us some positive inaccuracy, usually an extra single page. */ pages += howmany(zones, UMA_SLAB_SPACE / (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT)); return (pages); } void uma_startup(void *mem, int npages) { struct uma_zctor_args args; uma_keg_t masterkeg; uintptr_t m; #ifdef DIAGNOSTIC printf("Entering %s with %d boot pages configured\n", __func__, npages); #endif rw_init(&uma_rwlock, "UMA lock"); /* Use bootpages memory for the zone of zones and zone of kegs. */ m = (uintptr_t)mem; zones = (uma_zone_t)m; m += roundup(zsize, CACHE_LINE_SIZE); kegs = (uma_zone_t)m; m += roundup(zsize, CACHE_LINE_SIZE); masterkeg = (uma_keg_t)m; m += roundup(ksize, CACHE_LINE_SIZE); m = roundup(m, PAGE_SIZE); npages -= (m - (uintptr_t)mem) / PAGE_SIZE; mem = (void *)m; /* "manually" create the initial zone */ memset(&args, 0, sizeof(args)); args.name = "UMA Kegs"; args.size = ksize; args.ctor = keg_ctor; args.dtor = keg_dtor; args.uminit = zero_init; args.fini = NULL; args.keg = masterkeg; args.align = UMA_BOOT_ALIGN - 1; args.flags = UMA_ZFLAG_INTERNAL; zone_ctor(kegs, zsize, &args, M_WAITOK); bootmem = mem; boot_pages = npages; args.name = "UMA Zones"; args.size = zsize; args.ctor = zone_ctor; args.dtor = zone_dtor; args.uminit = zero_init; args.fini = NULL; args.keg = NULL; args.align = UMA_BOOT_ALIGN - 1; args.flags = UMA_ZFLAG_INTERNAL; zone_ctor(zones, zsize, &args, M_WAITOK); /* Now make a zone for slab headers */ slabzone = uma_zcreate("UMA Slabs", sizeof(struct uma_slab), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); hashzone = uma_zcreate("UMA Hash", sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); bucket_init(); booted = BOOT_STRAPPED; } void uma_startup1(void) { #ifdef DIAGNOSTIC printf("Entering %s with %d boot pages left\n", __func__, boot_pages); #endif booted = BOOT_PAGEALLOC; } void uma_startup2(void) { #ifdef DIAGNOSTIC printf("Entering %s with %d boot pages left\n", __func__, boot_pages); #endif booted = BOOT_BUCKETS; sx_init(&uma_drain_lock, "umadrain"); bucket_enable(); } /* * Initialize our callout handle * */ static void uma_startup3(void) { #ifdef INVARIANTS TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor); uma_dbg_cnt = counter_u64_alloc(M_WAITOK); uma_skip_cnt = counter_u64_alloc(M_WAITOK); #endif callout_init(&uma_callout, 1); callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); booted = BOOT_RUNNING; } static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, int align, uint32_t flags) { struct uma_kctor_args args; args.size = size; args.uminit = uminit; args.fini = fini; args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; args.flags = flags; args.zone = zone; return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK)); } /* Public functions */ /* See uma.h */ void uma_set_align(int align) { if (align != UMA_ALIGN_CACHE) uma_align_cache = align; } /* See uma.h */ uma_zone_t uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, uma_init uminit, uma_fini fini, int align, uint32_t flags) { struct uma_zctor_args args; uma_zone_t res; bool locked; KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", align, name)); /* This stuff is essential for the zone ctor */ memset(&args, 0, sizeof(args)); args.name = name; args.size = size; args.ctor = ctor; args.dtor = dtor; args.uminit = uminit; args.fini = fini; #ifdef INVARIANTS /* * If a zone is being created with an empty constructor and * destructor, pass UMA constructor/destructor which checks for * memory use after free. */ if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { args.ctor = trash_ctor; args.dtor = trash_dtor; args.uminit = trash_init; args.fini = trash_fini; } #endif args.align = align; args.flags = flags; args.keg = NULL; if (booted < BOOT_BUCKETS) { locked = false; } else { sx_slock(&uma_drain_lock); locked = true; } res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); if (locked) sx_sunlock(&uma_drain_lock); return (res); } /* See uma.h */ uma_zone_t uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, uma_init zinit, uma_fini zfini, uma_zone_t master) { struct uma_zctor_args args; uma_keg_t keg; uma_zone_t res; bool locked; keg = zone_first_keg(master); memset(&args, 0, sizeof(args)); args.name = name; args.size = keg->uk_size; args.ctor = ctor; args.dtor = dtor; args.uminit = zinit; args.fini = zfini; args.align = keg->uk_align; args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; args.keg = keg; if (booted < BOOT_BUCKETS) { locked = false; } else { sx_slock(&uma_drain_lock); locked = true; } /* XXX Attaches only one keg of potentially many. */ res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); if (locked) sx_sunlock(&uma_drain_lock); return (res); } /* See uma.h */ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease, void *arg, int flags) { struct uma_zctor_args args; memset(&args, 0, sizeof(args)); args.name = name; args.size = size; args.ctor = ctor; args.dtor = dtor; args.uminit = zinit; args.fini = zfini; args.import = zimport; args.release = zrelease; args.arg = arg; args.align = 0; args.flags = flags; return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); } static void zone_lock_pair(uma_zone_t a, uma_zone_t b) { if (a < b) { ZONE_LOCK(a); mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); } else { ZONE_LOCK(b); mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); } } static void zone_unlock_pair(uma_zone_t a, uma_zone_t b) { ZONE_UNLOCK(a); ZONE_UNLOCK(b); } int uma_zsecond_add(uma_zone_t zone, uma_zone_t master) { uma_klink_t klink; uma_klink_t kl; int error; error = 0; klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); zone_lock_pair(zone, master); /* * zone must use vtoslab() to resolve objects and must already be * a secondary. */ if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { error = EINVAL; goto out; } /* * The new master must also use vtoslab(). */ if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { error = EINVAL; goto out; } /* * The underlying object must be the same size. rsize * may be different. */ if (master->uz_size != zone->uz_size) { error = E2BIG; goto out; } /* * Put it at the end of the list. */ klink->kl_keg = zone_first_keg(master); LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { if (LIST_NEXT(kl, kl_link) == NULL) { LIST_INSERT_AFTER(kl, klink, kl_link); break; } } klink = NULL; zone->uz_flags |= UMA_ZFLAG_MULTI; zone->uz_slab = zone_fetch_slab_multi; out: zone_unlock_pair(zone, master); if (klink != NULL) free(klink, M_TEMP); return (error); } /* See uma.h */ void uma_zdestroy(uma_zone_t zone) { sx_slock(&uma_drain_lock); zone_free_item(zones, zone, NULL, SKIP_NONE); sx_sunlock(&uma_drain_lock); } void uma_zwait(uma_zone_t zone) { void *item; item = uma_zalloc_arg(zone, NULL, M_WAITOK); uma_zfree(zone, item); } void * uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags) { void *item; #ifdef SMP int i; MPASS(zone->uz_flags & UMA_ZONE_PCPU); #endif item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO); if (item != NULL && (flags & M_ZERO)) { #ifdef SMP for (i = 0; i <= mp_maxid; i++) bzero(zpcpu_get_cpu(item, i), zone->uz_size); #else bzero(item, zone->uz_size); #endif } return (item); } /* * A stub while both regular and pcpu cases are identical. */ void uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata) { #ifdef SMP MPASS(zone->uz_flags & UMA_ZONE_PCPU); #endif uma_zfree_arg(zone, item, udata); } /* See uma.h */ void * uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) { uma_zone_domain_t zdom; uma_bucket_t bucket; uma_cache_t cache; void *item; int cpu, domain, lockfail; #ifdef INVARIANTS bool skipdbg; #endif /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); /* This is the fast path allocation */ CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", curthread, zone->uz_name, zone, flags); if (flags & M_WAITOK) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "uma_zalloc_arg: zone \"%s\"", zone->uz_name); } KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC")); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zalloc_arg: called with spinlock or critical section held")); if (zone->uz_flags & UMA_ZONE_PCPU) KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone " "with M_ZERO passed")); #ifdef DEBUG_MEMGUARD if (memguard_cmp_zone(zone)) { item = memguard_alloc(zone->uz_size, flags); if (item != NULL) { if (zone->uz_init != NULL && zone->uz_init(item, zone->uz_size, flags) != 0) return (NULL); if (zone->uz_ctor != NULL && zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { zone->uz_fini(item, zone->uz_size); return (NULL); } return (item); } /* This is unfortunate but should not be fatal. */ } #endif /* * If possible, allocate from the per-CPU cache. There are two * requirements for safe access to the per-CPU cache: (1) the thread * accessing the cache must not be preempted or yield during access, * and (2) the thread must not migrate CPUs without switching which * cache it accesses. We rely on a critical section to prevent * preemption and migration. We release the critical section in * order to acquire the zone mutex if we are unable to allocate from * the current cache; when we re-acquire the critical section, we * must detect and handle migration if it has occurred. */ zalloc_restart: critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; zalloc_start: bucket = cache->uc_allocbucket; if (bucket != NULL && bucket->ub_cnt > 0) { bucket->ub_cnt--; item = bucket->ub_bucket[bucket->ub_cnt]; #ifdef INVARIANTS bucket->ub_bucket[bucket->ub_cnt] = NULL; #endif KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); cache->uc_allocs++; critical_exit(); #ifdef INVARIANTS skipdbg = uma_dbg_zskip(zone, item); #endif if (zone->uz_ctor != NULL && #ifdef INVARIANTS (!skipdbg || zone->uz_ctor != trash_ctor || zone->uz_dtor != trash_dtor) && #endif zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { atomic_add_long(&zone->uz_fails, 1); zone_free_item(zone, item, udata, SKIP_DTOR); return (NULL); } #ifdef INVARIANTS if (!skipdbg) uma_dbg_alloc(zone, NULL, item); #endif if (flags & M_ZERO) uma_zero_item(item, zone); return (item); } /* * We have run out of items in our alloc bucket. * See if we can switch with our free bucket. */ bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt > 0) { CTR2(KTR_UMA, "uma_zalloc: zone %s(%p) swapping empty with alloc", zone->uz_name, zone); cache->uc_freebucket = cache->uc_allocbucket; cache->uc_allocbucket = bucket; goto zalloc_start; } /* * Discard any empty allocation bucket while we hold no locks. */ bucket = cache->uc_allocbucket; cache->uc_allocbucket = NULL; critical_exit(); if (bucket != NULL) bucket_free(zone, bucket, udata); if (zone->uz_flags & UMA_ZONE_NUMA) { domain = PCPU_GET(domain); if (VM_DOMAIN_EMPTY(domain)) domain = UMA_ANYDOMAIN; } else domain = UMA_ANYDOMAIN; /* Short-circuit for zones without buckets and low memory. */ if (zone->uz_count == 0 || bucketdisable) goto zalloc_item; /* * Attempt to retrieve the item from the per-CPU cache has failed, so * we must go back to the zone. This requires the zone lock, so we * must drop the critical section, then re-acquire it when we go back * to the cache. Since the critical section is released, we may be * preempted or migrate. As such, make sure not to maintain any * thread-local state specific to the cache from prior to releasing * the critical section. */ lockfail = 0; if (ZONE_TRYLOCK(zone) == 0) { /* Record contention to size the buckets. */ ZONE_LOCK(zone); lockfail = 1; } critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; /* See if we lost the race to fill the cache. */ if (cache->uc_allocbucket != NULL) { ZONE_UNLOCK(zone); goto zalloc_start; } /* * Check the zone's cache of buckets. */ if (domain == UMA_ANYDOMAIN) zdom = &zone->uz_domain[0]; else zdom = &zone->uz_domain[domain]; - if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { + if ((bucket = zone_try_fetch_bucket(zone, zdom, true)) != NULL) { KASSERT(bucket->ub_cnt != 0, ("uma_zalloc_arg: Returning an empty bucket.")); - - LIST_REMOVE(bucket, ub_link); cache->uc_allocbucket = bucket; ZONE_UNLOCK(zone); goto zalloc_start; } /* We are no longer associated with this CPU. */ critical_exit(); /* * We bump the uz count when the cache size is insufficient to * handle the working set. */ if (lockfail && zone->uz_count < BUCKET_MAX) zone->uz_count++; ZONE_UNLOCK(zone); /* * Now lets just fill a bucket and put it on the free list. If that * works we'll restart the allocation from the beginning and it * will use the just filled bucket. */ bucket = zone_alloc_bucket(zone, udata, domain, flags); CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", zone->uz_name, zone, bucket); if (bucket != NULL) { ZONE_LOCK(zone); critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; + /* * See if we lost the race or were migrated. Cache the * initialized bucket to make this less likely or claim * the memory directly. */ if (cache->uc_allocbucket == NULL && ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || domain == PCPU_GET(domain))) { cache->uc_allocbucket = bucket; + zdom->uzd_imax += bucket->ub_cnt; } else if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { critical_exit(); ZONE_UNLOCK(zone); bucket_drain(zone, bucket); bucket_free(zone, bucket, udata); goto zalloc_restart; } else - LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); + zone_put_bucket(zone, zdom, bucket, false); ZONE_UNLOCK(zone); goto zalloc_start; } /* * We may not be able to get a bucket so return an actual item. */ zalloc_item: item = zone_alloc_item(zone, udata, domain, flags); return (item); } void * uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) { /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); /* This is the fast path allocation */ CTR5(KTR_UMA, "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d", curthread, zone->uz_name, zone, domain, flags); if (flags & M_WAITOK) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "uma_zalloc_domain: zone \"%s\"", zone->uz_name); } KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zalloc_domain: called with spinlock or critical section held")); return (zone_alloc_item(zone, udata, domain, flags)); } /* * Find a slab with some space. Prefer slabs that are partially used over those * that are totally full. This helps to reduce fragmentation. * * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check * only 'domain'. */ static uma_slab_t keg_first_slab(uma_keg_t keg, int domain, bool rr) { uma_domain_t dom; uma_slab_t slab; int start; KASSERT(domain >= 0 && domain < vm_ndomains, ("keg_first_slab: domain %d out of range", domain)); slab = NULL; start = domain; do { dom = &keg->uk_domain[domain]; if (!LIST_EMPTY(&dom->ud_part_slab)) return (LIST_FIRST(&dom->ud_part_slab)); if (!LIST_EMPTY(&dom->ud_free_slab)) { slab = LIST_FIRST(&dom->ud_free_slab); LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); return (slab); } if (rr) domain = (domain + 1) % vm_ndomains; } while (domain != start); return (NULL); } static uma_slab_t keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags) { uint32_t reserve; mtx_assert(&keg->uk_lock, MA_OWNED); reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; if (keg->uk_free <= reserve) return (NULL); return (keg_first_slab(keg, domain, rr)); } static uma_slab_t keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) { struct vm_domainset_iter di; uma_domain_t dom; uma_slab_t slab; int aflags, domain; bool rr; restart: mtx_assert(&keg->uk_lock, MA_OWNED); /* * Use the keg's policy if upper layers haven't already specified a * domain (as happens with first-touch zones). * * To avoid races we run the iterator with the keg lock held, but that * means that we cannot allow the vm_domainset layer to sleep. Thus, * clear M_WAITOK and handle low memory conditions locally. */ rr = rdomain == UMA_ANYDOMAIN; if (rr) { aflags = (flags & ~M_WAITOK) | M_NOWAIT; vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &aflags); } else { aflags = flags; domain = rdomain; } for (;;) { slab = keg_fetch_free_slab(keg, domain, rr, flags); if (slab != NULL) { MPASS(slab->us_keg == keg); return (slab); } /* * M_NOVM means don't ask at all! */ if (flags & M_NOVM) break; if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { keg->uk_flags |= UMA_ZFLAG_FULL; /* * If this is not a multi-zone, set the FULL bit. * Otherwise slab_multi() takes care of it. */ if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { zone->uz_flags |= UMA_ZFLAG_FULL; zone_log_warning(zone); zone_maxaction(zone); } if (flags & M_NOWAIT) return (NULL); zone->uz_sleeps++; msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); continue; } slab = keg_alloc_slab(keg, zone, domain, aflags); /* * If we got a slab here it's safe to mark it partially used * and return. We assume that the caller is going to remove * at least one item. */ if (slab) { MPASS(slab->us_keg == keg); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); return (slab); } KEG_LOCK(keg); if (rr && vm_domainset_iter_policy(&di, &domain) != 0) { if ((flags & M_WAITOK) != 0) { KEG_UNLOCK(keg); vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask); KEG_LOCK(keg); goto restart; } break; } } /* * We might not have been able to get a slab but another cpu * could have while we were unlocked. Check again before we * fail. */ if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) { MPASS(slab->us_keg == keg); return (slab); } return (NULL); } static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) { uma_slab_t slab; if (keg == NULL) { keg = zone_first_keg(zone); KEG_LOCK(keg); } for (;;) { slab = keg_fetch_slab(keg, zone, domain, flags); if (slab) return (slab); if (flags & (M_NOWAIT | M_NOVM)) break; } KEG_UNLOCK(keg); return (NULL); } /* * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns * with the keg locked. On NULL no lock is held. * * The last pointer is used to seed the search. It is not required. */ static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags) { uma_klink_t klink; uma_slab_t slab; uma_keg_t keg; int flags; int empty; int full; /* * Don't wait on the first pass. This will skip limit tests * as well. We don't want to block if we can find a provider * without blocking. */ flags = (rflags & ~M_WAITOK) | M_NOWAIT; /* * Use the last slab allocated as a hint for where to start * the search. */ if (last != NULL) { slab = keg_fetch_slab(last, zone, domain, flags); if (slab) return (slab); KEG_UNLOCK(last); } /* * Loop until we have a slab incase of transient failures * while M_WAITOK is specified. I'm not sure this is 100% * required but we've done it for so long now. */ for (;;) { empty = 0; full = 0; /* * Search the available kegs for slabs. Be careful to hold the * correct lock while calling into the keg layer. */ LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { keg = klink->kl_keg; KEG_LOCK(keg); if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { slab = keg_fetch_slab(keg, zone, domain, flags); if (slab) return (slab); } if (keg->uk_flags & UMA_ZFLAG_FULL) full++; else empty++; KEG_UNLOCK(keg); } if (rflags & (M_NOWAIT | M_NOVM)) break; flags = rflags; /* * All kegs are full. XXX We can't atomically check all kegs * and sleep so just sleep for a short period and retry. */ if (full && !empty) { ZONE_LOCK(zone); zone->uz_flags |= UMA_ZFLAG_FULL; zone->uz_sleeps++; zone_log_warning(zone); zone_maxaction(zone); msleep(zone, zone->uz_lockptr, PVM, "zonelimit", hz/100); zone->uz_flags &= ~UMA_ZFLAG_FULL; ZONE_UNLOCK(zone); continue; } } return (NULL); } static void * slab_alloc_item(uma_keg_t keg, uma_slab_t slab) { uma_domain_t dom; void *item; uint8_t freei; MPASS(keg == slab->us_keg); mtx_assert(&keg->uk_lock, MA_OWNED); freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); item = slab->us_data + (keg->uk_rsize * freei); slab->us_freecount--; keg->uk_free--; /* Move this slab to the full list */ if (slab->us_freecount == 0) { LIST_REMOVE(slab, us_link); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); } return (item); } static int zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags) { uma_slab_t slab; uma_keg_t keg; #ifdef NUMA int stripe; #endif int i; slab = NULL; keg = NULL; /* Try to keep the buckets totally full */ for (i = 0; i < max; ) { if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL) break; keg = slab->us_keg; #ifdef NUMA stripe = howmany(max, vm_ndomains); #endif while (slab->us_freecount && i < max) { bucket[i++] = slab_alloc_item(keg, slab); if (keg->uk_free <= keg->uk_reserve) break; #ifdef NUMA /* * If the zone is striped we pick a new slab for every * N allocations. Eliminating this conditional will * instead pick a new domain for each bucket rather * than stripe within each bucket. The current option * produces more fragmentation and requires more cpu * time but yields better distribution. */ if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 && vm_ndomains > 1 && --stripe == 0) break; #endif } /* Don't block if we allocated any successfully. */ flags &= ~M_WAITOK; flags |= M_NOWAIT; } if (slab != NULL) KEG_UNLOCK(keg); return i; } static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) { uma_bucket_t bucket; int max; CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain); /* Don't wait for buckets, preserve caller's NOVM setting. */ bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); if (bucket == NULL) return (NULL); max = MIN(bucket->ub_entries, zone->uz_count); bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, max, domain, flags); /* * Initialize the memory if necessary. */ if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { int i; for (i = 0; i < bucket->ub_cnt; i++) if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, flags) != 0) break; /* * If we couldn't initialize the whole bucket, put the * rest back onto the freelist. */ if (i != bucket->ub_cnt) { zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], bucket->ub_cnt - i); #ifdef INVARIANTS bzero(&bucket->ub_bucket[i], sizeof(void *) * (bucket->ub_cnt - i)); #endif bucket->ub_cnt = i; } } if (bucket->ub_cnt == 0) { bucket_free(zone, bucket, udata); atomic_add_long(&zone->uz_fails, 1); return (NULL); } return (bucket); } /* * Allocates a single item from a zone. * * Arguments * zone The zone to alloc for. * udata The data to be passed to the constructor. * domain The domain to allocate from or UMA_ANYDOMAIN. * flags M_WAITOK, M_NOWAIT, M_ZERO. * * Returns * NULL if there is no memory and M_NOWAIT is set * An item if successful */ static void * zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) { void *item; #ifdef INVARIANTS bool skipdbg; #endif item = NULL; if (domain != UMA_ANYDOMAIN) { /* avoid allocs targeting empty domains */ if (VM_DOMAIN_EMPTY(domain)) domain = UMA_ANYDOMAIN; } if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) goto fail; atomic_add_long(&zone->uz_allocs, 1); #ifdef INVARIANTS skipdbg = uma_dbg_zskip(zone, item); #endif /* * We have to call both the zone's init (not the keg's init) * and the zone's ctor. This is because the item is going from * a keg slab directly to the user, and the user is expecting it * to be both zone-init'd as well as zone-ctor'd. */ if (zone->uz_init != NULL) { if (zone->uz_init(item, zone->uz_size, flags) != 0) { zone_free_item(zone, item, udata, SKIP_FINI); goto fail; } } if (zone->uz_ctor != NULL && #ifdef INVARIANTS (!skipdbg || zone->uz_ctor != trash_ctor || zone->uz_dtor != trash_dtor) && #endif zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { zone_free_item(zone, item, udata, SKIP_DTOR); goto fail; } #ifdef INVARIANTS if (!skipdbg) uma_dbg_alloc(zone, NULL, item); #endif if (flags & M_ZERO) uma_zero_item(item, zone); CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, zone->uz_name, zone); return (item); fail: CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", zone->uz_name, zone); atomic_add_long(&zone->uz_fails, 1); return (NULL); } /* See uma.h */ void uma_zfree_arg(uma_zone_t zone, void *item, void *udata) { uma_cache_t cache; uma_bucket_t bucket; uma_zone_domain_t zdom; int cpu, domain, lockfail; #ifdef INVARIANTS bool skipdbg; #endif /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, zone->uz_name); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zfree_arg: called with spinlock or critical section held")); /* uma_zfree(..., NULL) does nothing, to match free(9). */ if (item == NULL) return; #ifdef DEBUG_MEMGUARD if (is_memguard_addr(item)) { if (zone->uz_dtor != NULL) zone->uz_dtor(item, zone->uz_size, udata); if (zone->uz_fini != NULL) zone->uz_fini(item, zone->uz_size); memguard_free(item); return; } #endif #ifdef INVARIANTS skipdbg = uma_dbg_zskip(zone, item); if (skipdbg == false) { if (zone->uz_flags & UMA_ZONE_MALLOC) uma_dbg_free(zone, udata, item); else uma_dbg_free(zone, NULL, item); } if (zone->uz_dtor != NULL && (!skipdbg || zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) #else if (zone->uz_dtor != NULL) #endif zone->uz_dtor(item, zone->uz_size, udata); /* * The race here is acceptable. If we miss it we'll just have to wait * a little longer for the limits to be reset. */ if (zone->uz_flags & UMA_ZFLAG_FULL) goto zfree_item; /* * If possible, free to the per-CPU cache. There are two * requirements for safe access to the per-CPU cache: (1) the thread * accessing the cache must not be preempted or yield during access, * and (2) the thread must not migrate CPUs without switching which * cache it accesses. We rely on a critical section to prevent * preemption and migration. We release the critical section in * order to acquire the zone mutex if we are unable to free to the * current cache; when we re-acquire the critical section, we must * detect and handle migration if it has occurred. */ zfree_restart: critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; zfree_start: /* * Try to free into the allocbucket first to give LIFO ordering * for cache-hot datastructures. Spill over into the freebucket * if necessary. Alloc will swap them if one runs dry. */ bucket = cache->uc_allocbucket; if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, ("uma_zfree: Freeing to non free bucket index.")); bucket->ub_bucket[bucket->ub_cnt] = item; bucket->ub_cnt++; cache->uc_frees++; critical_exit(); return; } /* * We must go back the zone, which requires acquiring the zone lock, * which in turn means we must release and re-acquire the critical * section. Since the critical section is released, we may be * preempted or migrate. As such, make sure not to maintain any * thread-local state specific to the cache from prior to releasing * the critical section. */ critical_exit(); if (zone->uz_count == 0 || bucketdisable) goto zfree_item; lockfail = 0; if (ZONE_TRYLOCK(zone) == 0) { /* Record contention to size the buckets. */ ZONE_LOCK(zone); lockfail = 1; } critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { ZONE_UNLOCK(zone); goto zfree_start; } cache->uc_freebucket = NULL; /* We are no longer associated with this CPU. */ critical_exit(); if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) { domain = PCPU_GET(domain); if (VM_DOMAIN_EMPTY(domain)) domain = UMA_ANYDOMAIN; } else domain = 0; zdom = &zone->uz_domain[0]; /* Can we throw this on the zone full list? */ if (bucket != NULL) { CTR3(KTR_UMA, "uma_zfree: zone %s(%p) putting bucket %p on free list", zone->uz_name, zone, bucket); /* ub_cnt is pointing to the last free item */ KASSERT(bucket->ub_cnt != 0, ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { ZONE_UNLOCK(zone); bucket_drain(zone, bucket); bucket_free(zone, bucket, udata); goto zfree_restart; } else - LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); + zone_put_bucket(zone, zdom, bucket, true); } /* * We bump the uz count when the cache size is insufficient to * handle the working set. */ if (lockfail && zone->uz_count < BUCKET_MAX) zone->uz_count++; ZONE_UNLOCK(zone); bucket = bucket_alloc(zone, udata, M_NOWAIT); CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", zone->uz_name, zone, bucket); if (bucket) { critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; if (cache->uc_freebucket == NULL && ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || domain == PCPU_GET(domain))) { cache->uc_freebucket = bucket; goto zfree_start; } /* * We lost the race, start over. We have to drop our * critical section to free the bucket. */ critical_exit(); bucket_free(zone, bucket, udata); goto zfree_restart; } /* * If nothing else caught this, we'll just do an internal free. */ zfree_item: zone_free_item(zone, item, udata, SKIP_DTOR); return; } void uma_zfree_domain(uma_zone_t zone, void *item, void *udata) { /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, zone->uz_name); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zfree_domain: called with spinlock or critical section held")); /* uma_zfree(..., NULL) does nothing, to match free(9). */ if (item == NULL) return; zone_free_item(zone, item, udata, SKIP_NONE); } static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) { uma_domain_t dom; uint8_t freei; mtx_assert(&keg->uk_lock, MA_OWNED); MPASS(keg == slab->us_keg); dom = &keg->uk_domain[slab->us_domain]; /* Do we need to remove from any lists? */ if (slab->us_freecount+1 == keg->uk_ipers) { LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); } else if (slab->us_freecount == 0) { LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); } /* Slab management. */ freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); slab->us_freecount++; /* Keg statistics. */ keg->uk_free++; } static void zone_release(uma_zone_t zone, void **bucket, int cnt) { void *item; uma_slab_t slab; uma_keg_t keg; uint8_t *mem; int clearfull; int i; clearfull = 0; keg = zone_first_keg(zone); KEG_LOCK(keg); for (i = 0; i < cnt; i++) { item = bucket[i]; if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); if (zone->uz_flags & UMA_ZONE_HASH) { slab = hash_sfind(&keg->uk_hash, mem); } else { mem += keg->uk_pgoff; slab = (uma_slab_t)mem; } } else { slab = vtoslab((vm_offset_t)item); if (slab->us_keg != keg) { KEG_UNLOCK(keg); keg = slab->us_keg; KEG_LOCK(keg); } } slab_free_item(keg, slab, item); if (keg->uk_flags & UMA_ZFLAG_FULL) { if (keg->uk_pages < keg->uk_maxpages) { keg->uk_flags &= ~UMA_ZFLAG_FULL; clearfull = 1; } /* * We can handle one more allocation. Since we're * clearing ZFLAG_FULL, wake up all procs blocked * on pages. This should be uncommon, so keeping this * simple for now (rather than adding count of blocked * threads etc). */ wakeup(keg); } } KEG_UNLOCK(keg); if (clearfull) { ZONE_LOCK(zone); zone->uz_flags &= ~UMA_ZFLAG_FULL; wakeup(zone); ZONE_UNLOCK(zone); } } /* * Frees a single item to any zone. * * Arguments: * zone The zone to free to * item The item we're freeing * udata User supplied data for the dtor * skip Skip dtors and finis */ static void zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) { #ifdef INVARIANTS bool skipdbg; skipdbg = uma_dbg_zskip(zone, item); if (skip == SKIP_NONE && !skipdbg) { if (zone->uz_flags & UMA_ZONE_MALLOC) uma_dbg_free(zone, udata, item); else uma_dbg_free(zone, NULL, item); } if (skip < SKIP_DTOR && zone->uz_dtor != NULL && (!skipdbg || zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) #else if (skip < SKIP_DTOR && zone->uz_dtor != NULL) #endif zone->uz_dtor(item, zone->uz_size, udata); if (skip < SKIP_FINI && zone->uz_fini) zone->uz_fini(item, zone->uz_size); atomic_add_long(&zone->uz_frees, 1); zone->uz_release(zone->uz_arg, &item, 1); } /* See uma.h */ int uma_zone_set_max(uma_zone_t zone, int nitems) { uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return (0); KEG_LOCK(keg); keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; if (keg->uk_maxpages * keg->uk_ipers < nitems) keg->uk_maxpages += keg->uk_ppera; nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; KEG_UNLOCK(keg); return (nitems); } /* See uma.h */ int uma_zone_get_max(uma_zone_t zone) { int nitems; uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return (0); KEG_LOCK(keg); nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; KEG_UNLOCK(keg); return (nitems); } /* See uma.h */ void uma_zone_set_warning(uma_zone_t zone, const char *warning) { ZONE_LOCK(zone); zone->uz_warning = warning; ZONE_UNLOCK(zone); } /* See uma.h */ void uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) { ZONE_LOCK(zone); TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); ZONE_UNLOCK(zone); } /* See uma.h */ int uma_zone_get_cur(uma_zone_t zone) { int64_t nitems; u_int i; ZONE_LOCK(zone); nitems = zone->uz_allocs - zone->uz_frees; CPU_FOREACH(i) { /* * See the comment in sysctl_vm_zone_stats() regarding the * safety of accessing the per-cpu caches. With the zone lock * held, it is safe, but can potentially result in stale data. */ nitems += zone->uz_cpu[i].uc_allocs - zone->uz_cpu[i].uc_frees; } ZONE_UNLOCK(zone); return (nitems < 0 ? 0 : nitems); } /* See uma.h */ void uma_zone_set_init(uma_zone_t zone, uma_init uminit) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); KEG_LOCK(keg); KASSERT(keg->uk_pages == 0, ("uma_zone_set_init on non-empty keg")); keg->uk_init = uminit; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_set_fini(uma_zone_t zone, uma_fini fini) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); KEG_LOCK(keg); KASSERT(keg->uk_pages == 0, ("uma_zone_set_fini on non-empty keg")); keg->uk_fini = fini; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) { ZONE_LOCK(zone); KASSERT(zone_first_keg(zone)->uk_pages == 0, ("uma_zone_set_zinit on non-empty keg")); zone->uz_init = zinit; ZONE_UNLOCK(zone); } /* See uma.h */ void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) { ZONE_LOCK(zone); KASSERT(zone_first_keg(zone)->uk_pages == 0, ("uma_zone_set_zfini on non-empty keg")); zone->uz_fini = zfini; ZONE_UNLOCK(zone); } /* See uma.h */ /* XXX uk_freef is not actually used with the zone locked */ void uma_zone_set_freef(uma_zone_t zone, uma_free freef) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); KEG_LOCK(keg); keg->uk_freef = freef; KEG_UNLOCK(keg); } /* See uma.h */ /* XXX uk_allocf is not actually used with the zone locked */ void uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) { uma_keg_t keg; keg = zone_first_keg(zone); KEG_LOCK(keg); keg->uk_allocf = allocf; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_reserve(uma_zone_t zone, int items) { uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return; KEG_LOCK(keg); keg->uk_reserve = items; KEG_UNLOCK(keg); return; } /* See uma.h */ int uma_zone_reserve_kva(uma_zone_t zone, int count) { uma_keg_t keg; vm_offset_t kva; u_int pages; keg = zone_first_keg(zone); if (keg == NULL) return (0); pages = count / keg->uk_ipers; if (pages * keg->uk_ipers < count) pages++; pages *= keg->uk_ppera; #ifdef UMA_MD_SMALL_ALLOC if (keg->uk_ppera > 1) { #else if (1) { #endif kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); if (kva == 0) return (0); } else kva = 0; KEG_LOCK(keg); keg->uk_kva = kva; keg->uk_offset = 0; keg->uk_maxpages = pages; #ifdef UMA_MD_SMALL_ALLOC keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; #else keg->uk_allocf = noobj_alloc; #endif keg->uk_flags |= UMA_ZONE_NOFREE; KEG_UNLOCK(keg); return (1); } /* See uma.h */ void uma_prealloc(uma_zone_t zone, int items) { struct vm_domainset_iter di; uma_domain_t dom; uma_slab_t slab; uma_keg_t keg; int domain, flags, slabs; keg = zone_first_keg(zone); if (keg == NULL) return; KEG_LOCK(keg); slabs = items / keg->uk_ipers; if (slabs * keg->uk_ipers < items) slabs++; flags = M_WAITOK; vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &flags); while (slabs-- > 0) { slab = keg_alloc_slab(keg, zone, domain, flags); if (slab == NULL) return; MPASS(slab->us_keg == keg); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); if (vm_domainset_iter_policy(&di, &domain) != 0) break; } KEG_UNLOCK(keg); } /* See uma.h */ static void uma_reclaim_locked(bool kmem_danger) { CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); sx_assert(&uma_drain_lock, SA_XLOCKED); bucket_enable(); zone_foreach(zone_drain); if (vm_page_count_min() || kmem_danger) { cache_drain_safe(NULL); zone_foreach(zone_drain); } + /* * Some slabs may have been freed but this zone will be visited early * we visit again so that we can free pages that are empty once other * zones are drained. We have to do the same for buckets. */ zone_drain(slabzone); bucket_zone_drain(); } void uma_reclaim(void) { sx_xlock(&uma_drain_lock); uma_reclaim_locked(false); sx_xunlock(&uma_drain_lock); } static volatile int uma_reclaim_needed; void uma_reclaim_wakeup(void) { if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) wakeup(uma_reclaim); } void uma_reclaim_worker(void *arg __unused) { for (;;) { sx_xlock(&uma_drain_lock); while (atomic_load_int(&uma_reclaim_needed) == 0) sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", hz); sx_xunlock(&uma_drain_lock); EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); sx_xlock(&uma_drain_lock); uma_reclaim_locked(true); atomic_store_int(&uma_reclaim_needed, 0); sx_xunlock(&uma_drain_lock); /* Don't fire more than once per-second. */ pause("umarclslp", hz); } } /* See uma.h */ int uma_zone_exhausted(uma_zone_t zone) { int full; ZONE_LOCK(zone); full = (zone->uz_flags & UMA_ZFLAG_FULL); ZONE_UNLOCK(zone); return (full); } int uma_zone_exhausted_nolock(uma_zone_t zone) { return (zone->uz_flags & UMA_ZFLAG_FULL); } void * uma_large_malloc_domain(vm_size_t size, int domain, int wait) { struct domainset *policy; vm_offset_t addr; uma_slab_t slab; if (domain != UMA_ANYDOMAIN) { /* avoid allocs targeting empty domains */ if (VM_DOMAIN_EMPTY(domain)) domain = UMA_ANYDOMAIN; } slab = zone_alloc_item(slabzone, NULL, domain, wait); if (slab == NULL) return (NULL); policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() : DOMAINSET_FIXED(domain); addr = kmem_malloc_domainset(policy, size, wait); if (addr != 0) { vsetslab(addr, slab); slab->us_data = (void *)addr; slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC; slab->us_size = size; slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE( pmap_kextract(addr))); uma_total_inc(size); } else { zone_free_item(slabzone, slab, NULL, SKIP_NONE); } return ((void *)addr); } void * uma_large_malloc(vm_size_t size, int wait) { return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait); } void uma_large_free(uma_slab_t slab) { KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0, ("uma_large_free: Memory not allocated with uma_large_malloc.")); kmem_free((vm_offset_t)slab->us_data, slab->us_size); uma_total_dec(slab->us_size); zone_free_item(slabzone, slab, NULL, SKIP_NONE); } static void uma_zero_item(void *item, uma_zone_t zone) { bzero(item, zone->uz_size); } unsigned long uma_limit(void) { return (uma_kmem_limit); } void uma_set_limit(unsigned long limit) { uma_kmem_limit = limit; } unsigned long uma_size(void) { return (uma_kmem_total); } long uma_avail(void) { return (uma_kmem_limit - uma_kmem_total); } void uma_print_stats(void) { zone_foreach(uma_print_zone); } static void slab_print(uma_slab_t slab) { printf("slab: keg %p, data %p, freecount %d\n", slab->us_keg, slab->us_data, slab->us_freecount); } static void cache_print(uma_cache_t cache) { printf("alloc: %p(%d), free: %p(%d)\n", cache->uc_allocbucket, cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, cache->uc_freebucket, cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); } static void uma_print_keg(uma_keg_t keg) { uma_domain_t dom; uma_slab_t slab; int i; printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " "out %d free %d limit %d\n", keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, keg->uk_ipers, keg->uk_ppera, (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); for (i = 0; i < vm_ndomains; i++) { dom = &keg->uk_domain[i]; printf("Part slabs:\n"); LIST_FOREACH(slab, &dom->ud_part_slab, us_link) slab_print(slab); printf("Free slabs:\n"); LIST_FOREACH(slab, &dom->ud_free_slab, us_link) slab_print(slab); printf("Full slabs:\n"); LIST_FOREACH(slab, &dom->ud_full_slab, us_link) slab_print(slab); } } void uma_print_zone(uma_zone_t zone) { uma_cache_t cache; uma_klink_t kl; int i; printf("zone: %s(%p) size %d flags %#x\n", zone->uz_name, zone, zone->uz_size, zone->uz_flags); LIST_FOREACH(kl, &zone->uz_kegs, kl_link) uma_print_keg(kl->kl_keg); CPU_FOREACH(i) { cache = &zone->uz_cpu[i]; printf("CPU %d Cache:\n", i); cache_print(cache); } } #ifdef DDB /* * Generate statistics across both the zone and its per-cpu cache's. Return * desired statistics if the pointer is non-NULL for that statistic. * * Note: does not update the zone statistics, as it can't safely clear the * per-CPU cache statistic. * * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't * safe from off-CPU; we should modify the caches to track this information * directly so that we don't have to. */ static void -uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, +uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp, uint64_t *freesp, uint64_t *sleepsp) { uma_cache_t cache; uint64_t allocs, frees, sleeps; int cachefree, cpu; allocs = frees = sleeps = 0; cachefree = 0; CPU_FOREACH(cpu) { cache = &z->uz_cpu[cpu]; if (cache->uc_allocbucket != NULL) cachefree += cache->uc_allocbucket->ub_cnt; if (cache->uc_freebucket != NULL) cachefree += cache->uc_freebucket->ub_cnt; allocs += cache->uc_allocs; frees += cache->uc_frees; } allocs += z->uz_allocs; frees += z->uz_frees; sleeps += z->uz_sleeps; if (cachefreep != NULL) *cachefreep = cachefree; if (allocsp != NULL) *allocsp = allocs; if (freesp != NULL) *freesp = frees; if (sleepsp != NULL) *sleepsp = sleeps; } #endif /* DDB */ static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) { uma_keg_t kz; uma_zone_t z; int count; count = 0; rw_rlock(&uma_rwlock); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) count++; } rw_runlock(&uma_rwlock); return (sysctl_handle_int(oidp, &count, 0, req)); } static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) { struct uma_stream_header ush; struct uma_type_header uth; struct uma_percpu_stat *ups; - uma_bucket_t bucket; uma_zone_domain_t zdom; struct sbuf sbuf; uma_cache_t cache; uma_klink_t kl; uma_keg_t kz; uma_zone_t z; uma_keg_t k; int count, error, i; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); count = 0; rw_rlock(&uma_rwlock); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) count++; } /* * Insert stream header. */ bzero(&ush, sizeof(ush)); ush.ush_version = UMA_STREAM_VERSION; ush.ush_maxcpus = (mp_maxid + 1); ush.ush_count = count; (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) { bzero(&uth, sizeof(uth)); ZONE_LOCK(z); strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); uth.uth_align = kz->uk_align; uth.uth_size = kz->uk_size; uth.uth_rsize = kz->uk_rsize; LIST_FOREACH(kl, &z->uz_kegs, kl_link) { k = kl->kl_keg; uth.uth_maxpages += k->uk_maxpages; uth.uth_pages += k->uk_pages; uth.uth_keg_free += k->uk_free; uth.uth_limit = (k->uk_maxpages / k->uk_ppera) * k->uk_ipers; } /* * A zone is secondary is it is not the first entry * on the keg's zone list. */ if ((z->uz_flags & UMA_ZONE_SECONDARY) && (LIST_FIRST(&kz->uk_zones) != z)) uth.uth_zone_flags = UTH_ZONE_SECONDARY; for (i = 0; i < vm_ndomains; i++) { zdom = &z->uz_domain[i]; - LIST_FOREACH(bucket, &zdom->uzd_buckets, - ub_link) - uth.uth_zone_free += bucket->ub_cnt; + uth.uth_zone_free += zdom->uzd_nitems; } uth.uth_allocs = z->uz_allocs; uth.uth_frees = z->uz_frees; uth.uth_fails = z->uz_fails; uth.uth_sleeps = z->uz_sleeps; /* * While it is not normally safe to access the cache * bucket pointers while not on the CPU that owns the * cache, we only allow the pointers to be exchanged * without the zone lock held, not invalidated, so * accept the possible race associated with bucket * exchange during monitoring. */ for (i = 0; i < mp_maxid + 1; i++) { bzero(&ups[i], sizeof(*ups)); if (kz->uk_flags & UMA_ZFLAG_INTERNAL || CPU_ABSENT(i)) continue; cache = &z->uz_cpu[i]; if (cache->uc_allocbucket != NULL) ups[i].ups_cache_free += cache->uc_allocbucket->ub_cnt; if (cache->uc_freebucket != NULL) ups[i].ups_cache_free += cache->uc_freebucket->ub_cnt; ups[i].ups_allocs = cache->uc_allocs; ups[i].ups_frees = cache->uc_frees; } ZONE_UNLOCK(z); (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); for (i = 0; i < mp_maxid + 1; i++) (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); } } rw_runlock(&uma_rwlock); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); free(ups, M_TEMP); return (error); } int sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) { uma_zone_t zone = *(uma_zone_t *)arg1; int error, max; max = uma_zone_get_max(zone); error = sysctl_handle_int(oidp, &max, 0, req); if (error || !req->newptr) return (error); uma_zone_set_max(zone, max); return (0); } int sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) { uma_zone_t zone = *(uma_zone_t *)arg1; int cur; cur = uma_zone_get_cur(zone); return (sysctl_handle_int(oidp, &cur, 0, req)); } #ifdef INVARIANTS static uma_slab_t uma_dbg_getslab(uma_zone_t zone, void *item) { uma_slab_t slab; uma_keg_t keg; uint8_t *mem; mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); if (zone->uz_flags & UMA_ZONE_VTOSLAB) { slab = vtoslab((vm_offset_t)mem); } else { /* * It is safe to return the slab here even though the * zone is unlocked because the item's allocation state * essentially holds a reference. */ ZONE_LOCK(zone); keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; if (keg->uk_flags & UMA_ZONE_HASH) slab = hash_sfind(&keg->uk_hash, mem); else slab = (uma_slab_t)(mem + keg->uk_pgoff); ZONE_UNLOCK(zone); } return (slab); } static bool uma_dbg_zskip(uma_zone_t zone, void *mem) { uma_keg_t keg; if ((keg = zone_first_keg(zone)) == NULL) return (true); return (uma_dbg_kskip(keg, mem)); } static bool uma_dbg_kskip(uma_keg_t keg, void *mem) { uintptr_t idx; if (dbg_divisor == 0) return (true); if (dbg_divisor == 1) return (false); idx = (uintptr_t)mem >> PAGE_SHIFT; if (keg->uk_ipers > 1) { idx *= keg->uk_ipers; idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize; } if ((idx / dbg_divisor) * dbg_divisor != idx) { counter_u64_add(uma_skip_cnt, 1); return (true); } counter_u64_add(uma_dbg_cnt, 1); return (false); } /* * Set up the slab's freei data such that uma_dbg_free can function. * */ static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) { uma_keg_t keg; int freei; if (slab == NULL) { slab = uma_dbg_getslab(zone, item); if (slab == NULL) panic("uma: item %p did not belong to zone %s\n", item, zone->uz_name); } keg = slab->us_keg; freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); return; } /* * Verifies freed addresses. Checks for alignment, valid slab membership * and duplicate frees. * */ static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) { uma_keg_t keg; int freei; if (slab == NULL) { slab = uma_dbg_getslab(zone, item); if (slab == NULL) panic("uma: Freed item %p did not belong to zone %s\n", item, zone->uz_name); } keg = slab->us_keg; freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; if (freei >= keg->uk_ipers) panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); if (((freei * keg->uk_rsize) + slab->us_data) != item) panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); } #endif /* INVARIANTS */ #ifdef DDB DB_SHOW_COMMAND(uma, db_show_uma) { - uma_bucket_t bucket; uma_keg_t kz; uma_zone_t z; - uma_zone_domain_t zdom; uint64_t allocs, frees, sleeps; - int cachefree, i; + long cachefree; + int i; db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", "Free", "Requests", "Sleeps", "Bucket"); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) { if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { allocs = z->uz_allocs; frees = z->uz_frees; sleeps = z->uz_sleeps; cachefree = 0; } else uma_zone_sumstat(z, &cachefree, &allocs, &frees, &sleeps); if (!((z->uz_flags & UMA_ZONE_SECONDARY) && (LIST_FIRST(&kz->uk_zones) != z))) cachefree += kz->uk_free; - for (i = 0; i < vm_ndomains; i++) { - zdom = &z->uz_domain[i]; - LIST_FOREACH(bucket, &zdom->uzd_buckets, - ub_link) - cachefree += bucket->ub_cnt; - } - db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", + for (i = 0; i < vm_ndomains; i++) + cachefree += z->uz_domain[i].uzd_nitems; + + db_printf("%18s %8ju %8jd %8ld %12ju %8ju %8u\n", z->uz_name, (uintmax_t)kz->uk_size, (intmax_t)(allocs - frees), cachefree, (uintmax_t)allocs, sleeps, z->uz_count); if (db_pager_quit) return; } } } DB_SHOW_COMMAND(umacache, db_show_umacache) { - uma_bucket_t bucket; uma_zone_t z; - uma_zone_domain_t zdom; uint64_t allocs, frees; - int cachefree, i; + long cachefree; + int i; db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", "Requests", "Bucket"); LIST_FOREACH(z, &uma_cachezones, uz_link) { uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); - for (i = 0; i < vm_ndomains; i++) { - zdom = &z->uz_domain[i]; - LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) - cachefree += bucket->ub_cnt; - } - db_printf("%18s %8ju %8jd %8d %12ju %8u\n", + for (i = 0; i < vm_ndomains; i++) + cachefree += z->uz_domain[i].uzd_nitems; + db_printf("%18s %8ju %8jd %8ld %12ju %8u\n", z->uz_name, (uintmax_t)z->uz_size, (intmax_t)(allocs - frees), cachefree, (uintmax_t)allocs, z->uz_count); if (db_pager_quit) return; } } #endif /* DDB */ Index: projects/clang700-import/sys/vm/uma_int.h =================================================================== --- projects/clang700-import/sys/vm/uma_int.h (revision 340426) +++ projects/clang700-import/sys/vm/uma_int.h (revision 340427) @@ -1,489 +1,494 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson * Copyright (c) 2004, 2005 Bosko Milekic * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include /* * This file includes definitions, structures, prototypes, and inlines that * should not be used outside of the actual implementation of UMA. */ /* * The brief summary; Zones describe unique allocation types. Zones are * organized into per-CPU caches which are filled by buckets. Buckets are * organized according to memory domains. Buckets are filled from kegs which * are also organized according to memory domains. Kegs describe a unique * allocation type, backend memory provider, and layout. Kegs are associated * with one or more zones and zones reference one or more kegs. Kegs provide * slabs which are virtually contiguous collections of pages. Each slab is * broken down int one or more items that will satisfy an individual allocation. * * Allocation is satisfied in the following order: * 1) Per-CPU cache * 2) Per-domain cache of buckets * 3) Slab from any of N kegs * 4) Backend page provider * * More detail on individual objects is contained below: * * Kegs contain lists of slabs which are stored in either the full bin, empty * bin, or partially allocated bin, to reduce fragmentation. They also contain * the user supplied value for size, which is adjusted for alignment purposes * and rsize is the result of that. The Keg also stores information for * managing a hash of page addresses that maps pages to uma_slab_t structures * for pages that don't have embedded uma_slab_t's. * * Keg slab lists are organized by memory domain to support NUMA allocation * policies. By default allocations are spread across domains to reduce the * potential for hotspots. Special keg creation flags may be specified to * prefer location allocation. However there is no strict enforcement as frees * may happen on any CPU and these are returned to the CPU-local cache * regardless of the originating domain. * * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may * be allocated off the page from a special slab zone. The free list within a * slab is managed with a bitmask. For item sizes that would yield more than * 10% memory waste we potentially allocate a separate uma_slab_t if this will * improve the number of items per slab that will fit. * * The only really gross cases, with regards to memory waste, are for those * items that are just over half the page size. You can get nearly 50% waste, * so you fall back to the memory footprint of the power of two allocator. I * have looked at memory allocation sizes on many of the machines available to * me, and there does not seem to be an abundance of allocations at this range * so at this time it may not make sense to optimize for it. This can, of * course, be solved with dynamic slab sizes. * * Kegs may serve multiple Zones but by far most of the time they only serve * one. When a Zone is created, a Keg is allocated and setup for it. While * the backing Keg stores slabs, the Zone caches Buckets of items allocated * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor * pair, as well as with its own set of small per-CPU caches, layered above * the Zone's general Bucket cache. * * The PCPU caches are protected by critical sections, and may be accessed * safely only from their associated CPU, while the Zones backed by the same * Keg all share a common Keg lock (to coalesce contention on the backing * slabs). The backing Keg typically only serves one Zone but in the case of * multiple Zones, one of the Zones is considered the Master Zone and all * Zone-related stats from the Keg are done in the Master Zone. For an * example of a Multi-Zone setup, refer to the Mbuf allocation code. */ /* * This is the representation for normal (Non OFFPAGE slab) * * i == item * s == slab pointer * * <---------------- Page (UMA_SLAB_SIZE) ------------------> * ___________________________________________________________ * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| * |___________________________________________________________| * * * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. * * ___________________________________________________________ * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | * |___________________________________________________________| * ___________ ^ * |slab header| | * |___________|---* * */ #ifndef VM_UMA_INT_H #define VM_UMA_INT_H #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ /* Max waste percentage before going to off page slab management */ #define UMA_MAX_WASTE 10 /* * Size of memory in a not offpage slab available for actual items. */ #define UMA_SLAB_SPACE (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /* * I doubt there will be many cases where this is exceeded. This is the initial * size of the hash table for uma_slabs that are managed off page. This hash * does expand by powers of two. Currently it doesn't get smaller. */ #define UMA_HASH_SIZE_INIT 32 /* * I should investigate other hashing algorithms. This should yield a low * number of collisions if the pages are relatively contiguous. */ #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask) #define UMA_HASH_INSERT(h, s, mem) \ SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ (mem))], (s), us_hlink) #define UMA_HASH_REMOVE(h, s, mem) \ SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ (mem))], (s), uma_slab, us_hlink) /* Hash table for freed address -> slab translation */ SLIST_HEAD(slabhead, uma_slab); struct uma_hash { struct slabhead *uh_slab_hash; /* Hash table for slabs */ int uh_hashsize; /* Current size of the hash table */ int uh_hashmask; /* Mask used during hashing */ }; /* * align field or structure to cache line */ #if defined(__amd64__) || defined(__powerpc64__) #define UMA_ALIGN __aligned(128) #else #define UMA_ALIGN #endif /* * Structures for per cpu queues. */ struct uma_bucket { LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ int16_t ub_cnt; /* Count of items in bucket. */ int16_t ub_entries; /* Max items. */ void *ub_bucket[]; /* actual allocation storage */ }; typedef struct uma_bucket * uma_bucket_t; struct uma_cache { uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ uint64_t uc_allocs; /* Count of allocations */ uint64_t uc_frees; /* Count of frees */ } UMA_ALIGN; typedef struct uma_cache * uma_cache_t; /* * Per-domain memory list. Embedded in the kegs. */ struct uma_domain { LIST_HEAD(,uma_slab) ud_part_slab; /* partially allocated slabs */ LIST_HEAD(,uma_slab) ud_free_slab; /* empty slab list */ LIST_HEAD(,uma_slab) ud_full_slab; /* full slabs */ }; typedef struct uma_domain * uma_domain_t; /* * Keg management structure * * TODO: Optimize for cache line size * */ struct uma_keg { struct mtx uk_lock; /* Lock for the keg */ struct uma_hash uk_hash; LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ struct domainset_ref uk_dr; /* Domain selection policy. */ uint32_t uk_align; /* Alignment mask */ uint32_t uk_pages; /* Total page count */ uint32_t uk_free; /* Count of items free in slabs */ uint32_t uk_reserve; /* Number of reserved items. */ uint32_t uk_size; /* Requested size of each item */ uint32_t uk_rsize; /* Real size of each item */ uint32_t uk_maxpages; /* Maximum number of pages to alloc */ uma_init uk_init; /* Keg's init routine */ uma_fini uk_fini; /* Keg's fini routine */ uma_alloc uk_allocf; /* Allocation function */ uma_free uk_freef; /* Free routine */ u_long uk_offset; /* Next free offset from base KVA */ vm_offset_t uk_kva; /* Zone base KVA */ uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ uint32_t uk_pgoff; /* Offset to uma_slab struct */ uint16_t uk_ppera; /* pages per allocation from backend */ uint16_t uk_ipers; /* Items per slab */ uint32_t uk_flags; /* Internal flags */ /* Least used fields go to the last cache line. */ const char *uk_name; /* Name of creating zone. */ LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ /* Must be last, variable sized. */ struct uma_domain uk_domain[]; /* Keg's slab lists. */ }; typedef struct uma_keg * uma_keg_t; /* * Free bits per-slab. */ #define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT) BITSET_DEFINE(slabbits, SLAB_SETSIZE); /* * The slab structure manages a single contiguous allocation from backing * store and subdivides it into individually allocatable items. */ struct uma_slab { uma_keg_t us_keg; /* Keg we live in */ union { LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ unsigned long _us_size; /* Size of allocation */ } us_type; SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ uint8_t *us_data; /* First item */ struct slabbits us_free; /* Free bitmask. */ #ifdef INVARIANTS struct slabbits us_debugfree; /* Debug bitmask. */ #endif uint16_t us_freecount; /* How many are free? */ uint8_t us_flags; /* Page flags see uma.h */ uint8_t us_domain; /* Backing NUMA domain. */ }; #define us_link us_type._us_link #define us_size us_type._us_size #if MAXMEMDOM >= 255 #error "Slab domain type insufficient" #endif typedef struct uma_slab * uma_slab_t; typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int, int); struct uma_klink { LIST_ENTRY(uma_klink) kl_link; uma_keg_t kl_keg; }; typedef struct uma_klink *uma_klink_t; struct uma_zone_domain { LIST_HEAD(,uma_bucket) uzd_buckets; /* full buckets */ + long uzd_nitems; /* total item count */ + long uzd_imax; /* maximum item count this period */ + long uzd_imin; /* minimum item count this period */ + long uzd_wss; /* working set size estimate */ }; typedef struct uma_zone_domain * uma_zone_domain_t; /* * Zone management structure * * TODO: Optimize for cache line size * */ struct uma_zone { /* Offset 0, used in alloc/free fast/medium fast path and const. */ struct mtx *uz_lockptr; const char *uz_name; /* Text name of the zone */ struct uma_zone_domain *uz_domain; /* per-domain buckets */ uint32_t uz_flags; /* Flags inherited from kegs */ uint32_t uz_size; /* Size inherited from kegs */ uma_ctor uz_ctor; /* Constructor for each allocation */ uma_dtor uz_dtor; /* Destructor */ uma_init uz_init; /* Initializer for each item */ uma_fini uz_fini; /* Finalizer for each item. */ /* Offset 64, used in bucket replenish. */ uma_import uz_import; /* Import new memory to cache. */ uma_release uz_release; /* Release memory from cache. */ void *uz_arg; /* Import/release argument. */ uma_slaballoc uz_slab; /* Allocate a slab from the backend. */ uint16_t uz_count; /* Amount of items in full bucket */ uint16_t uz_count_min; /* Minimal amount of items there */ /* 32bit pad on 64bit. */ LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */ /* Offset 128 Rare. */ /* * The lock is placed here to avoid adjacent line prefetcher * in fast paths and to take up space near infrequently accessed * members to reduce alignment overhead. */ struct mtx uz_lock; /* Lock for the zone */ struct uma_klink uz_klink; /* klink for first keg. */ /* The next two fields are used to print a rate-limited warnings. */ const char *uz_warning; /* Warning to print on failure */ struct timeval uz_ratecheck; /* Warnings rate-limiting */ struct task uz_maxaction; /* Task to run when at limit */ /* 16 bytes of pad. */ /* Offset 256, atomic stats. */ volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */ volatile u_long uz_fails; /* Total number of alloc failures */ volatile u_long uz_frees; /* Total number of frees */ uint64_t uz_sleeps; /* Total number of alloc sleeps */ /* * This HAS to be the last item because we adjust the zone size * based on NCPU and then allocate the space for the zones. */ struct uma_cache uz_cpu[]; /* Per cpu caches */ /* uz_domain follows here. */ }; /* * These flags must not overlap with the UMA_ZONE flags specified in uma.h. */ #define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */ #define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */ #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */ #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ #define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */ #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ #define UMA_ZFLAG_INHERIT \ (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET) static inline uma_keg_t zone_first_keg(uma_zone_t zone) { uma_klink_t klink; klink = LIST_FIRST(&zone->uz_kegs); return (klink != NULL) ? klink->kl_keg : NULL; } #undef UMA_ALIGN #ifdef _KERNEL /* Internal prototypes */ static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); void *uma_large_malloc(vm_size_t size, int wait); void *uma_large_malloc_domain(vm_size_t size, int domain, int wait); void uma_large_free(uma_slab_t slab); /* Lock Macros */ #define KEG_LOCK_INIT(k, lc) \ do { \ if ((lc)) \ mtx_init(&(k)->uk_lock, (k)->uk_name, \ (k)->uk_name, MTX_DEF | MTX_DUPOK); \ else \ mtx_init(&(k)->uk_lock, (k)->uk_name, \ "UMA zone", MTX_DEF | MTX_DUPOK); \ } while (0) #define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock) #define KEG_LOCK(k) mtx_lock(&(k)->uk_lock) #define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock) #define ZONE_LOCK_INIT(z, lc) \ do { \ if ((lc)) \ mtx_init(&(z)->uz_lock, (z)->uz_name, \ (z)->uz_name, MTX_DEF | MTX_DUPOK); \ else \ mtx_init(&(z)->uz_lock, (z)->uz_name, \ "UMA zone", MTX_DEF | MTX_DUPOK); \ } while (0) - + #define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr) #define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr) #define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr) #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock) +#define ZONE_LOCK_ASSERT(z) mtx_assert((z)->uz_lockptr, MA_OWNED) /* * Find a slab within a hash table. This is used for OFFPAGE zones to lookup * the slab structure. * * Arguments: * hash The hash table to search. * data The base page of the item. * * Returns: * A pointer to a slab if successful, else NULL. */ static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data) { uma_slab_t slab; int hval; hval = UMA_HASH(hash, data); SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { if ((uint8_t *)slab->us_data == data) return (slab); } return (NULL); } static __inline uma_slab_t vtoslab(vm_offset_t va) { vm_page_t p; p = PHYS_TO_VM_PAGE(pmap_kextract(va)); return ((uma_slab_t)p->plinks.s.pv); } static __inline void vsetslab(vm_offset_t va, uma_slab_t slab) { vm_page_t p; p = PHYS_TO_VM_PAGE(pmap_kextract(va)); p->plinks.s.pv = slab; } /* * The following two functions may be defined by architecture specific code * if they can provide more efficient allocation functions. This is useful * for using direct mapped addresses. */ void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait); void uma_small_free(void *mem, vm_size_t size, uint8_t flags); /* Set a global soft limit on UMA managed memory. */ void uma_set_limit(unsigned long limit); #endif /* _KERNEL */ #endif /* VM_UMA_INT_H */ Index: projects/clang700-import/sys/x86/include/x86_var.h =================================================================== --- projects/clang700-import/sys/x86/include/x86_var.h (revision 340426) +++ projects/clang700-import/sys/x86/include/x86_var.h (revision 340427) @@ -1,153 +1,154 @@ /*- * Copyright (c) 1995 Bruce D. Evans. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _X86_X86_VAR_H_ #define _X86_X86_VAR_H_ /* * Miscellaneous machine-dependent declarations. */ extern long Maxmem; extern u_int basemem; extern int busdma_swi_pending; extern u_int cpu_exthigh; extern u_int cpu_feature; extern u_int cpu_feature2; extern u_int amd_feature; extern u_int amd_feature2; extern u_int amd_rascap; extern u_int amd_pminfo; extern u_int amd_extended_feature_extensions; extern u_int via_feature_rng; extern u_int via_feature_xcrypt; extern u_int cpu_clflush_line_size; extern u_int cpu_stdext_feature; extern u_int cpu_stdext_feature2; extern u_int cpu_stdext_feature3; extern uint64_t cpu_ia32_arch_caps; extern u_int cpu_fxsr; extern u_int cpu_high; extern u_int cpu_id; extern u_int cpu_max_ext_state_size; extern u_int cpu_mxcsr_mask; extern u_int cpu_procinfo; extern u_int cpu_procinfo2; extern char cpu_vendor[]; extern u_int cpu_vendor_id; extern u_int cpu_mon_mwait_flags; extern u_int cpu_mon_min_size; extern u_int cpu_mon_max_size; extern u_int cpu_maxphyaddr; extern char ctx_switch_xsave[]; extern u_int hv_high; extern char hv_vendor[]; extern char kstack[]; extern char sigcode[]; extern int szsigcode; extern int vm_page_dump_size; extern int workaround_erratum383; extern int _udatasel; extern int _ucodesel; extern int _ucode32sel; extern int _ufssel; extern int _ugssel; extern int use_xsave; extern uint64_t xsave_mask; extern u_int max_apic_id; extern int pti; extern int hw_ibrs_active; extern int hw_ssb_active; struct pcb; struct thread; struct reg; struct fpreg; struct dbreg; struct dumperinfo; struct trapframe; /* * The interface type of the interrupt handler entry point cannot be * expressed in C. Use simplest non-variadic function type as an * approximation. */ typedef void alias_for_inthand_t(void); /* * Returns the maximum physical address that can be used with the * current system. */ static __inline vm_paddr_t cpu_getmaxphyaddr(void) { #if defined(__i386__) && !defined(PAE) return (0xffffffff); #else return ((1ULL << cpu_maxphyaddr) - 1); #endif } bool acpi_get_fadt_bootflags(uint16_t *flagsp); void *alloc_fpusave(int flags); void busdma_swi(void); bool cpu_mwait_usable(void); void cpu_probe_amdc1e(void); void cpu_setregs(void); bool disable_wp(void); void restore_wp(bool old_wp); void dump_add_page(vm_paddr_t); void dump_drop_page(vm_paddr_t); void finishidentcpu(void); void identify_cpu1(void); void identify_cpu2(void); +void identify_cpu_fixup_bsp(void); void identify_hypervisor(void); void initializecpu(void); void initializecpucache(void); bool fix_cpuid(void); void fillw(int /*u_short*/ pat, void *base, size_t cnt); int is_physical_memory(vm_paddr_t addr); int isa_nmi(int cd); void handle_ibrs_entry(void); void handle_ibrs_exit(void); void hw_ibrs_recalculate(void); void hw_ssb_recalculate(bool all_cpus); void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame); void nmi_call_kdb_smp(u_int type, struct trapframe *frame); void nmi_handle_intr(u_int type, struct trapframe *frame); void pagecopy(void *from, void *to); void printcpuinfo(void); int pti_get_default(void); int user_dbreg_trap(register_t dr6); int minidumpsys(struct dumperinfo *); struct pcb *get_pcb_td(struct thread *td); #endif Index: projects/clang700-import/sys/x86/x86/identcpu.c =================================================================== --- projects/clang700-import/sys/x86/x86/identcpu.c (revision 340426) +++ projects/clang700-import/sys/x86/x86/identcpu.c (revision 340427) @@ -1,2503 +1,2511 @@ /*- * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * Copyright (c) 1997 KATO Takenori. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Id: machdep.c,v 1.193 1996/06/18 01:22:04 bde Exp */ #include __FBSDID("$FreeBSD$"); #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #define IDENTBLUE_CYRIX486 0 #define IDENTBLUE_IBMCPU 1 #define IDENTBLUE_CYRIXM2 2 static void identifycyrix(void); static void print_transmeta_info(void); #endif static u_int find_cpu_vendor_id(void); static void print_AMD_info(void); static void print_INTEL_info(void); static void print_INTEL_TLB(u_int data); static void print_hypervisor_info(void); static void print_svm_info(void); static void print_via_padlock_info(void); static void print_vmx_info(void); #ifdef __i386__ int cpu; /* Are we 386, 386sx, 486, etc? */ int cpu_class; #endif u_int cpu_feature; /* Feature flags */ u_int cpu_feature2; /* Feature flags */ u_int amd_feature; /* AMD feature flags */ u_int amd_feature2; /* AMD feature flags */ u_int amd_rascap; /* AMD RAS capabilities */ u_int amd_pminfo; /* AMD advanced power management info */ u_int amd_extended_feature_extensions; u_int via_feature_rng; /* VIA RNG features */ u_int via_feature_xcrypt; /* VIA ACE features */ u_int cpu_high; /* Highest arg to CPUID */ u_int cpu_exthigh; /* Highest arg to extended CPUID */ u_int cpu_id; /* Stepping ID */ u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */ u_int cpu_procinfo2; /* Multicore info */ char cpu_vendor[20]; /* CPU Origin code */ u_int cpu_vendor_id; /* CPU vendor ID */ u_int cpu_fxsr; /* SSE enabled */ u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */ u_int cpu_clflush_line_size = 32; u_int cpu_stdext_feature; /* %ebx */ u_int cpu_stdext_feature2; /* %ecx */ u_int cpu_stdext_feature3; /* %edx */ uint64_t cpu_ia32_arch_caps; u_int cpu_max_ext_state_size; u_int cpu_mon_mwait_flags; /* MONITOR/MWAIT flags (CPUID.05H.ECX) */ u_int cpu_mon_min_size; /* MONITOR minimum range size, bytes */ u_int cpu_mon_max_size; /* MONITOR minimum range size, bytes */ u_int cpu_maxphyaddr; /* Max phys addr width in bits */ char machine[] = MACHINE; SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD, &via_feature_rng, 0, "VIA RNG feature available in CPU"); SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD, &via_feature_xcrypt, 0, "VIA xcrypt feature available in CPU"); #ifdef __amd64__ #ifdef SCTL_MASK32 extern int adaptive_machine_arch; #endif static int sysctl_hw_machine(SYSCTL_HANDLER_ARGS) { #ifdef SCTL_MASK32 static const char machine32[] = "i386"; #endif int error; #ifdef SCTL_MASK32 if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch) error = SYSCTL_OUT(req, machine32, sizeof(machine32)); else #endif error = SYSCTL_OUT(req, machine, sizeof(machine)); return (error); } SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class"); #else SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "Machine class"); #endif static char cpu_model[128]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD | CTLFLAG_MPSAFE, cpu_model, 0, "Machine model"); static int hw_clockrate; SYSCTL_INT(_hw, OID_AUTO, clockrate, CTLFLAG_RD, &hw_clockrate, 0, "CPU instruction clock rate"); u_int hv_high; char hv_vendor[16]; SYSCTL_STRING(_hw, OID_AUTO, hv_vendor, CTLFLAG_RD | CTLFLAG_MPSAFE, hv_vendor, 0, "Hypervisor vendor"); static eventhandler_tag tsc_post_tag; static char cpu_brand[48]; #ifdef __i386__ #define MAX_BRAND_INDEX 8 static const char *cpu_brandtable[MAX_BRAND_INDEX + 1] = { NULL, /* No brand */ "Intel Celeron", "Intel Pentium III", "Intel Pentium III Xeon", NULL, NULL, NULL, NULL, "Intel Pentium 4" }; static struct { char *cpu_name; int cpu_class; } cpus[] = { { "Intel 80286", CPUCLASS_286 }, /* CPU_286 */ { "i386SX", CPUCLASS_386 }, /* CPU_386SX */ { "i386DX", CPUCLASS_386 }, /* CPU_386 */ { "i486SX", CPUCLASS_486 }, /* CPU_486SX */ { "i486DX", CPUCLASS_486 }, /* CPU_486 */ { "Pentium", CPUCLASS_586 }, /* CPU_586 */ { "Cyrix 486", CPUCLASS_486 }, /* CPU_486DLC */ { "Pentium Pro", CPUCLASS_686 }, /* CPU_686 */ { "Cyrix 5x86", CPUCLASS_486 }, /* CPU_M1SC */ { "Cyrix 6x86", CPUCLASS_486 }, /* CPU_M1 */ { "Blue Lightning", CPUCLASS_486 }, /* CPU_BLUE */ { "Cyrix 6x86MX", CPUCLASS_686 }, /* CPU_M2 */ { "NexGen 586", CPUCLASS_386 }, /* CPU_NX586 (XXX) */ { "Cyrix 486S/DX", CPUCLASS_486 }, /* CPU_CY486DX */ { "Pentium II", CPUCLASS_686 }, /* CPU_PII */ { "Pentium III", CPUCLASS_686 }, /* CPU_PIII */ { "Pentium 4", CPUCLASS_686 }, /* CPU_P4 */ }; #endif static struct { char *vendor; u_int vendor_id; } cpu_vendors[] = { { INTEL_VENDOR_ID, CPU_VENDOR_INTEL }, /* GenuineIntel */ { AMD_VENDOR_ID, CPU_VENDOR_AMD }, /* AuthenticAMD */ { CENTAUR_VENDOR_ID, CPU_VENDOR_CENTAUR }, /* CentaurHauls */ #ifdef __i386__ { NSC_VENDOR_ID, CPU_VENDOR_NSC }, /* Geode by NSC */ { CYRIX_VENDOR_ID, CPU_VENDOR_CYRIX }, /* CyrixInstead */ { TRANSMETA_VENDOR_ID, CPU_VENDOR_TRANSMETA }, /* GenuineTMx86 */ { SIS_VENDOR_ID, CPU_VENDOR_SIS }, /* SiS SiS SiS */ { UMC_VENDOR_ID, CPU_VENDOR_UMC }, /* UMC UMC UMC */ { NEXGEN_VENDOR_ID, CPU_VENDOR_NEXGEN }, /* NexGenDriven */ { RISE_VENDOR_ID, CPU_VENDOR_RISE }, /* RiseRiseRise */ #if 0 /* XXX CPUID 8000_0000h and 8086_0000h, not 0000_0000h */ { "TransmetaCPU", CPU_VENDOR_TRANSMETA }, #endif #endif }; void printcpuinfo(void) { u_int regs[4], i; char *brand; printf("CPU: "); #ifdef __i386__ cpu_class = cpus[cpu].cpu_class; strncpy(cpu_model, cpus[cpu].cpu_name, sizeof (cpu_model)); #else strncpy(cpu_model, "Hammer", sizeof (cpu_model)); #endif /* Check for extended CPUID information and a processor name. */ if (cpu_exthigh >= 0x80000004) { brand = cpu_brand; for (i = 0x80000002; i < 0x80000005; i++) { do_cpuid(i, regs); memcpy(brand, regs, sizeof(regs)); brand += sizeof(regs); } } switch (cpu_vendor_id) { case CPU_VENDOR_INTEL: #ifdef __i386__ if ((cpu_id & 0xf00) > 0x300) { u_int brand_index; cpu_model[0] = '\0'; switch (cpu_id & 0x3000) { case 0x1000: strcpy(cpu_model, "Overdrive "); break; case 0x2000: strcpy(cpu_model, "Dual "); break; } switch (cpu_id & 0xf00) { case 0x400: strcat(cpu_model, "i486 "); /* Check the particular flavor of 486 */ switch (cpu_id & 0xf0) { case 0x00: case 0x10: strcat(cpu_model, "DX"); break; case 0x20: strcat(cpu_model, "SX"); break; case 0x30: strcat(cpu_model, "DX2"); break; case 0x40: strcat(cpu_model, "SL"); break; case 0x50: strcat(cpu_model, "SX2"); break; case 0x70: strcat(cpu_model, "DX2 Write-Back Enhanced"); break; case 0x80: strcat(cpu_model, "DX4"); break; } break; case 0x500: /* Check the particular flavor of 586 */ strcat(cpu_model, "Pentium"); switch (cpu_id & 0xf0) { case 0x00: strcat(cpu_model, " A-step"); break; case 0x10: strcat(cpu_model, "/P5"); break; case 0x20: strcat(cpu_model, "/P54C"); break; case 0x30: strcat(cpu_model, "/P24T"); break; case 0x40: strcat(cpu_model, "/P55C"); break; case 0x70: strcat(cpu_model, "/P54C"); break; case 0x80: strcat(cpu_model, "/P55C (quarter-micron)"); break; default: /* nothing */ break; } #if defined(I586_CPU) && !defined(NO_F00F_HACK) /* * XXX - If/when Intel fixes the bug, this * should also check the version of the * CPU, not just that it's a Pentium. */ has_f00f_bug = 1; #endif break; case 0x600: /* Check the particular flavor of 686 */ switch (cpu_id & 0xf0) { case 0x00: strcat(cpu_model, "Pentium Pro A-step"); break; case 0x10: strcat(cpu_model, "Pentium Pro"); break; case 0x30: case 0x50: case 0x60: strcat(cpu_model, "Pentium II/Pentium II Xeon/Celeron"); cpu = CPU_PII; break; case 0x70: case 0x80: case 0xa0: case 0xb0: strcat(cpu_model, "Pentium III/Pentium III Xeon/Celeron"); cpu = CPU_PIII; break; default: strcat(cpu_model, "Unknown 80686"); break; } break; case 0xf00: strcat(cpu_model, "Pentium 4"); cpu = CPU_P4; break; default: strcat(cpu_model, "unknown"); break; } /* * If we didn't get a brand name from the extended * CPUID, try to look it up in the brand table. */ if (cpu_high > 0 && *cpu_brand == '\0') { brand_index = cpu_procinfo & CPUID_BRAND_INDEX; if (brand_index <= MAX_BRAND_INDEX && cpu_brandtable[brand_index] != NULL) strcpy(cpu_brand, cpu_brandtable[brand_index]); } } #else /* Please make up your mind folks! */ strcat(cpu_model, "EM64T"); #endif break; case CPU_VENDOR_AMD: /* * Values taken from AMD Processor Recognition * http://www.amd.com/K6/k6docs/pdf/20734g.pdf * (also describes ``Features'' encodings. */ strcpy(cpu_model, "AMD "); #ifdef __i386__ switch (cpu_id & 0xFF0) { case 0x410: strcat(cpu_model, "Standard Am486DX"); break; case 0x430: strcat(cpu_model, "Enhanced Am486DX2 Write-Through"); break; case 0x470: strcat(cpu_model, "Enhanced Am486DX2 Write-Back"); break; case 0x480: strcat(cpu_model, "Enhanced Am486DX4/Am5x86 Write-Through"); break; case 0x490: strcat(cpu_model, "Enhanced Am486DX4/Am5x86 Write-Back"); break; case 0x4E0: strcat(cpu_model, "Am5x86 Write-Through"); break; case 0x4F0: strcat(cpu_model, "Am5x86 Write-Back"); break; case 0x500: strcat(cpu_model, "K5 model 0"); break; case 0x510: strcat(cpu_model, "K5 model 1"); break; case 0x520: strcat(cpu_model, "K5 PR166 (model 2)"); break; case 0x530: strcat(cpu_model, "K5 PR200 (model 3)"); break; case 0x560: strcat(cpu_model, "K6"); break; case 0x570: strcat(cpu_model, "K6 266 (model 1)"); break; case 0x580: strcat(cpu_model, "K6-2"); break; case 0x590: strcat(cpu_model, "K6-III"); break; case 0x5a0: strcat(cpu_model, "Geode LX"); break; default: strcat(cpu_model, "Unknown"); break; } #else if ((cpu_id & 0xf00) == 0xf00) strcat(cpu_model, "AMD64 Processor"); else strcat(cpu_model, "Unknown"); #endif break; #ifdef __i386__ case CPU_VENDOR_CYRIX: strcpy(cpu_model, "Cyrix "); switch (cpu_id & 0xff0) { case 0x440: strcat(cpu_model, "MediaGX"); break; case 0x520: strcat(cpu_model, "6x86"); break; case 0x540: cpu_class = CPUCLASS_586; strcat(cpu_model, "GXm"); break; case 0x600: strcat(cpu_model, "6x86MX"); break; default: /* * Even though CPU supports the cpuid * instruction, it can be disabled. * Therefore, this routine supports all Cyrix * CPUs. */ switch (cyrix_did & 0xf0) { case 0x00: switch (cyrix_did & 0x0f) { case 0x00: strcat(cpu_model, "486SLC"); break; case 0x01: strcat(cpu_model, "486DLC"); break; case 0x02: strcat(cpu_model, "486SLC2"); break; case 0x03: strcat(cpu_model, "486DLC2"); break; case 0x04: strcat(cpu_model, "486SRx"); break; case 0x05: strcat(cpu_model, "486DRx"); break; case 0x06: strcat(cpu_model, "486SRx2"); break; case 0x07: strcat(cpu_model, "486DRx2"); break; case 0x08: strcat(cpu_model, "486SRu"); break; case 0x09: strcat(cpu_model, "486DRu"); break; case 0x0a: strcat(cpu_model, "486SRu2"); break; case 0x0b: strcat(cpu_model, "486DRu2"); break; default: strcat(cpu_model, "Unknown"); break; } break; case 0x10: switch (cyrix_did & 0x0f) { case 0x00: strcat(cpu_model, "486S"); break; case 0x01: strcat(cpu_model, "486S2"); break; case 0x02: strcat(cpu_model, "486Se"); break; case 0x03: strcat(cpu_model, "486S2e"); break; case 0x0a: strcat(cpu_model, "486DX"); break; case 0x0b: strcat(cpu_model, "486DX2"); break; case 0x0f: strcat(cpu_model, "486DX4"); break; default: strcat(cpu_model, "Unknown"); break; } break; case 0x20: if ((cyrix_did & 0x0f) < 8) strcat(cpu_model, "6x86"); /* Where did you get it? */ else strcat(cpu_model, "5x86"); break; case 0x30: strcat(cpu_model, "6x86"); break; case 0x40: if ((cyrix_did & 0xf000) == 0x3000) { cpu_class = CPUCLASS_586; strcat(cpu_model, "GXm"); } else strcat(cpu_model, "MediaGX"); break; case 0x50: strcat(cpu_model, "6x86MX"); break; case 0xf0: switch (cyrix_did & 0x0f) { case 0x0d: strcat(cpu_model, "Overdrive CPU"); break; case 0x0e: strcpy(cpu_model, "Texas Instruments 486SXL"); break; case 0x0f: strcat(cpu_model, "486SLC/DLC"); break; default: strcat(cpu_model, "Unknown"); break; } break; default: strcat(cpu_model, "Unknown"); break; } break; } break; case CPU_VENDOR_RISE: strcpy(cpu_model, "Rise "); switch (cpu_id & 0xff0) { case 0x500: /* 6401 and 6441 (Kirin) */ case 0x520: /* 6510 (Lynx) */ strcat(cpu_model, "mP6"); break; default: strcat(cpu_model, "Unknown"); } break; #endif case CPU_VENDOR_CENTAUR: #ifdef __i386__ switch (cpu_id & 0xff0) { case 0x540: strcpy(cpu_model, "IDT WinChip C6"); break; case 0x580: strcpy(cpu_model, "IDT WinChip 2"); break; case 0x590: strcpy(cpu_model, "IDT WinChip 3"); break; case 0x660: strcpy(cpu_model, "VIA C3 Samuel"); break; case 0x670: if (cpu_id & 0x8) strcpy(cpu_model, "VIA C3 Ezra"); else strcpy(cpu_model, "VIA C3 Samuel 2"); break; case 0x680: strcpy(cpu_model, "VIA C3 Ezra-T"); break; case 0x690: strcpy(cpu_model, "VIA C3 Nehemiah"); break; case 0x6a0: case 0x6d0: strcpy(cpu_model, "VIA C7 Esther"); break; case 0x6f0: strcpy(cpu_model, "VIA Nano"); break; default: strcpy(cpu_model, "VIA/IDT Unknown"); } #else strcpy(cpu_model, "VIA "); if ((cpu_id & 0xff0) == 0x6f0) strcat(cpu_model, "Nano Processor"); else strcat(cpu_model, "Unknown"); #endif break; #ifdef __i386__ case CPU_VENDOR_IBM: strcpy(cpu_model, "Blue Lightning CPU"); break; case CPU_VENDOR_NSC: switch (cpu_id & 0xff0) { case 0x540: strcpy(cpu_model, "Geode SC1100"); cpu = CPU_GEODE1100; break; default: strcpy(cpu_model, "Geode/NSC unknown"); break; } break; #endif default: strcat(cpu_model, "Unknown"); break; } /* * Replace cpu_model with cpu_brand minus leading spaces if * we have one. */ brand = cpu_brand; while (*brand == ' ') ++brand; if (*brand != '\0') strcpy(cpu_model, brand); printf("%s (", cpu_model); if (tsc_freq != 0) { hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); } #ifdef __i386__ switch(cpu_class) { case CPUCLASS_286: printf("286"); break; case CPUCLASS_386: printf("386"); break; #if defined(I486_CPU) case CPUCLASS_486: printf("486"); break; #endif #if defined(I586_CPU) case CPUCLASS_586: printf("586"); break; #endif #if defined(I686_CPU) case CPUCLASS_686: printf("686"); break; #endif default: printf("Unknown"); /* will panic below... */ } #else printf("K8"); #endif printf("-class CPU)\n"); if (*cpu_vendor) printf(" Origin=\"%s\"", cpu_vendor); if (cpu_id) printf(" Id=0x%x", cpu_id); if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_CENTAUR || #ifdef __i386__ cpu_vendor_id == CPU_VENDOR_TRANSMETA || cpu_vendor_id == CPU_VENDOR_RISE || cpu_vendor_id == CPU_VENDOR_NSC || (cpu_vendor_id == CPU_VENDOR_CYRIX && ((cpu_id & 0xf00) > 0x500)) || #endif 0) { printf(" Family=0x%x", CPUID_TO_FAMILY(cpu_id)); printf(" Model=0x%x", CPUID_TO_MODEL(cpu_id)); printf(" Stepping=%u", cpu_id & CPUID_STEPPING); #ifdef __i386__ if (cpu_vendor_id == CPU_VENDOR_CYRIX) printf("\n DIR=0x%04x", cyrix_did); #endif /* * AMD CPUID Specification * http://support.amd.com/us/Embedded_TechDocs/25481.pdf * * Intel Processor Identification and CPUID Instruction * http://www.intel.com/assets/pdf/appnote/241618.pdf */ if (cpu_high > 0) { /* * Here we should probably set up flags indicating * whether or not various features are available. * The interesting ones are probably VME, PSE, PAE, * and PGE. The code already assumes without bothering * to check that all CPUs >= Pentium have a TSC and * MSRs. */ printf("\n Features=0x%b", cpu_feature, "\020" "\001FPU" /* Integral FPU */ "\002VME" /* Extended VM86 mode support */ "\003DE" /* Debugging Extensions (CR4.DE) */ "\004PSE" /* 4MByte page tables */ "\005TSC" /* Timestamp counter */ "\006MSR" /* Machine specific registers */ "\007PAE" /* Physical address extension */ "\010MCE" /* Machine Check support */ "\011CX8" /* CMPEXCH8 instruction */ "\012APIC" /* SMP local APIC */ "\013oldMTRR" /* Previous implementation of MTRR */ "\014SEP" /* Fast System Call */ "\015MTRR" /* Memory Type Range Registers */ "\016PGE" /* PG_G (global bit) support */ "\017MCA" /* Machine Check Architecture */ "\020CMOV" /* CMOV instruction */ "\021PAT" /* Page attributes table */ "\022PSE36" /* 36 bit address space support */ "\023PN" /* Processor Serial number */ "\024CLFLUSH" /* Has the CLFLUSH instruction */ "\025" "\026DTS" /* Debug Trace Store */ "\027ACPI" /* ACPI support */ "\030MMX" /* MMX instructions */ "\031FXSR" /* FXSAVE/FXRSTOR */ "\032SSE" /* Streaming SIMD Extensions */ "\033SSE2" /* Streaming SIMD Extensions #2 */ "\034SS" /* Self snoop */ "\035HTT" /* Hyperthreading (see EBX bit 16-23) */ "\036TM" /* Thermal Monitor clock slowdown */ "\037IA64" /* CPU can execute IA64 instructions */ "\040PBE" /* Pending Break Enable */ ); if (cpu_feature2 != 0) { printf("\n Features2=0x%b", cpu_feature2, "\020" "\001SSE3" /* SSE3 */ "\002PCLMULQDQ" /* Carry-Less Mul Quadword */ "\003DTES64" /* 64-bit Debug Trace */ "\004MON" /* MONITOR/MWAIT Instructions */ "\005DS_CPL" /* CPL Qualified Debug Store */ "\006VMX" /* Virtual Machine Extensions */ "\007SMX" /* Safer Mode Extensions */ "\010EST" /* Enhanced SpeedStep */ "\011TM2" /* Thermal Monitor 2 */ "\012SSSE3" /* SSSE3 */ "\013CNXT-ID" /* L1 context ID available */ "\014SDBG" /* IA32 silicon debug */ "\015FMA" /* Fused Multiply Add */ "\016CX16" /* CMPXCHG16B Instruction */ "\017xTPR" /* Send Task Priority Messages*/ "\020PDCM" /* Perf/Debug Capability MSR */ "\021" "\022PCID" /* Process-context Identifiers*/ "\023DCA" /* Direct Cache Access */ "\024SSE4.1" /* SSE 4.1 */ "\025SSE4.2" /* SSE 4.2 */ "\026x2APIC" /* xAPIC Extensions */ "\027MOVBE" /* MOVBE Instruction */ "\030POPCNT" /* POPCNT Instruction */ "\031TSCDLT" /* TSC-Deadline Timer */ "\032AESNI" /* AES Crypto */ "\033XSAVE" /* XSAVE/XRSTOR States */ "\034OSXSAVE" /* OS-Enabled State Management*/ "\035AVX" /* Advanced Vector Extensions */ "\036F16C" /* Half-precision conversions */ "\037RDRAND" /* RDRAND Instruction */ "\040HV" /* Hypervisor */ ); } if (amd_feature != 0) { printf("\n AMD Features=0x%b", amd_feature, "\020" /* in hex */ "\001" /* Same */ "\002" /* Same */ "\003" /* Same */ "\004" /* Same */ "\005" /* Same */ "\006" /* Same */ "\007" /* Same */ "\010" /* Same */ "\011" /* Same */ "\012" /* Same */ "\013" /* Undefined */ "\014SYSCALL" /* Have SYSCALL/SYSRET */ "\015" /* Same */ "\016" /* Same */ "\017" /* Same */ "\020" /* Same */ "\021" /* Same */ "\022" /* Same */ "\023" /* Reserved, unknown */ "\024MP" /* Multiprocessor Capable */ "\025NX" /* Has EFER.NXE, NX */ "\026" /* Undefined */ "\027MMX+" /* AMD MMX Extensions */ "\030" /* Same */ "\031" /* Same */ "\032FFXSR" /* Fast FXSAVE/FXRSTOR */ "\033Page1GB" /* 1-GB large page support */ "\034RDTSCP" /* RDTSCP */ "\035" /* Undefined */ "\036LM" /* 64 bit long mode */ "\0373DNow!+" /* AMD 3DNow! Extensions */ "\0403DNow!" /* AMD 3DNow! */ ); } if (amd_feature2 != 0) { printf("\n AMD Features2=0x%b", amd_feature2, "\020" "\001LAHF" /* LAHF/SAHF in long mode */ "\002CMP" /* CMP legacy */ "\003SVM" /* Secure Virtual Mode */ "\004ExtAPIC" /* Extended APIC register */ "\005CR8" /* CR8 in legacy mode */ "\006ABM" /* LZCNT instruction */ "\007SSE4A" /* SSE4A */ "\010MAS" /* Misaligned SSE mode */ "\011Prefetch" /* 3DNow! Prefetch/PrefetchW */ "\012OSVW" /* OS visible workaround */ "\013IBS" /* Instruction based sampling */ "\014XOP" /* XOP extended instructions */ "\015SKINIT" /* SKINIT/STGI */ "\016WDT" /* Watchdog timer */ "\017" "\020LWP" /* Lightweight Profiling */ "\021FMA4" /* 4-operand FMA instructions */ "\022TCE" /* Translation Cache Extension */ "\023" "\024NodeId" /* NodeId MSR support */ "\025" "\026TBM" /* Trailing Bit Manipulation */ "\027Topology" /* Topology Extensions */ "\030PCXC" /* Core perf count */ "\031PNXC" /* NB perf count */ "\032" "\033DBE" /* Data Breakpoint extension */ "\034PTSC" /* Performance TSC */ "\035PL2I" /* L2I perf count */ "\036MWAITX" /* MONITORX/MWAITX instructions */ "\037" "\040" ); } if (cpu_stdext_feature != 0) { printf("\n Structured Extended Features=0x%b", cpu_stdext_feature, "\020" /* RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ "\001FSGSBASE" "\002TSCADJ" "\003SGX" /* Bit Manipulation Instructions */ "\004BMI1" /* Hardware Lock Elision */ "\005HLE" /* Advanced Vector Instructions 2 */ "\006AVX2" /* FDP_EXCPTN_ONLY */ "\007FDPEXC" /* Supervisor Mode Execution Prot. */ "\010SMEP" /* Bit Manipulation Instructions */ "\011BMI2" "\012ERMS" /* Invalidate Processor Context ID */ "\013INVPCID" /* Restricted Transactional Memory */ "\014RTM" "\015PQM" "\016NFPUSG" /* Intel Memory Protection Extensions */ "\017MPX" "\020PQE" /* AVX512 Foundation */ "\021AVX512F" "\022AVX512DQ" /* Enhanced NRBG */ "\023RDSEED" /* ADCX + ADOX */ "\024ADX" /* Supervisor Mode Access Prevention */ "\025SMAP" "\026AVX512IFMA" "\027PCOMMIT" "\030CLFLUSHOPT" "\031CLWB" "\032PROCTRACE" "\033AVX512PF" "\034AVX512ER" "\035AVX512CD" "\036SHA" "\037AVX512BW" "\040AVX512VL" ); } if (cpu_stdext_feature2 != 0) { printf("\n Structured Extended Features2=0x%b", cpu_stdext_feature2, "\020" "\001PREFETCHWT1" "\002AVX512VBMI" "\003UMIP" "\004PKU" "\005OSPKE" "\027RDPID" "\037SGXLC" ); } if (cpu_stdext_feature3 != 0) { printf("\n Structured Extended Features3=0x%b", cpu_stdext_feature3, "\020" "\033IBPB" "\034STIBP" "\035L1DFL" "\036ARCH_CAP" "\040SSBD" ); } if ((cpu_feature2 & CPUID2_XSAVE) != 0) { cpuid_count(0xd, 0x1, regs); if (regs[0] != 0) { printf("\n XSAVE Features=0x%b", regs[0], "\020" "\001XSAVEOPT" "\002XSAVEC" "\003XINUSE" "\004XSAVES"); } } if (cpu_ia32_arch_caps != 0) { printf("\n IA32_ARCH_CAPS=0x%b", (u_int)cpu_ia32_arch_caps, "\020" "\001RDCL_NO" "\002IBRS_ALL" ); } if (amd_extended_feature_extensions != 0) { printf("\n " "AMD Extended Feature Extensions ID EBX=" "0x%b", amd_extended_feature_extensions, "\020" "\001CLZERO" "\002IRPerf" "\003XSaveErPtr"); } if (via_feature_rng != 0 || via_feature_xcrypt != 0) print_via_padlock_info(); if (cpu_feature2 & CPUID2_VMX) print_vmx_info(); if (amd_feature2 & AMDID2_SVM) print_svm_info(); if ((cpu_feature & CPUID_HTT) && cpu_vendor_id == CPU_VENDOR_AMD) cpu_feature &= ~CPUID_HTT; /* * If this CPU supports P-state invariant TSC then * mention the capability. */ if (tsc_is_invariant) { printf("\n TSC: P-state invariant"); if (tsc_perf_stat) printf(", performance statistics"); } } #ifdef __i386__ } else if (cpu_vendor_id == CPU_VENDOR_CYRIX) { printf(" DIR=0x%04x", cyrix_did); printf(" Stepping=%u", (cyrix_did & 0xf000) >> 12); printf(" Revision=%u", (cyrix_did & 0x0f00) >> 8); #ifndef CYRIX_CACHE_REALLY_WORKS if (cpu == CPU_M1 && (cyrix_did & 0xff00) < 0x1700) printf("\n CPU cache: write-through mode"); #endif #endif } /* Avoid ugly blank lines: only print newline when we have to. */ if (*cpu_vendor || cpu_id) printf("\n"); if (bootverbose) { if (cpu_vendor_id == CPU_VENDOR_AMD) print_AMD_info(); else if (cpu_vendor_id == CPU_VENDOR_INTEL) print_INTEL_info(); #ifdef __i386__ else if (cpu_vendor_id == CPU_VENDOR_TRANSMETA) print_transmeta_info(); #endif } print_hypervisor_info(); } #ifdef __i386__ void panicifcpuunsupported(void) { #if !defined(lint) #if !defined(I486_CPU) && !defined(I586_CPU) && !defined(I686_CPU) #error This kernel is not configured for one of the supported CPUs #endif #else /* lint */ #endif /* lint */ /* * Now that we have told the user what they have, * let them know if that machine type isn't configured. */ switch (cpu_class) { case CPUCLASS_286: /* a 286 should not make it this far, anyway */ case CPUCLASS_386: #if !defined(I486_CPU) case CPUCLASS_486: #endif #if !defined(I586_CPU) case CPUCLASS_586: #endif #if !defined(I686_CPU) case CPUCLASS_686: #endif panic("CPU class not configured"); default: break; } } static volatile u_int trap_by_rdmsr; /* * Special exception 6 handler. * The rdmsr instruction generates invalid opcodes fault on 486-class * Cyrix CPU. Stacked eip register points the rdmsr instruction in the * function identblue() when this handler is called. Stacked eip should * be advanced. */ inthand_t bluetrap6; #ifdef __GNUCLIKE_ASM __asm (" \n\ .text \n\ .p2align 2,0x90 \n\ .type " __XSTRING(CNAME(bluetrap6)) ",@function \n\ " __XSTRING(CNAME(bluetrap6)) ": \n\ ss \n\ movl $0xa8c1d," __XSTRING(CNAME(trap_by_rdmsr)) " \n\ addl $2, (%esp) /* rdmsr is a 2-byte instruction */ \n\ iret \n\ "); #endif /* * Special exception 13 handler. * Accessing non-existent MSR generates general protection fault. */ inthand_t bluetrap13; #ifdef __GNUCLIKE_ASM __asm (" \n\ .text \n\ .p2align 2,0x90 \n\ .type " __XSTRING(CNAME(bluetrap13)) ",@function \n\ " __XSTRING(CNAME(bluetrap13)) ": \n\ ss \n\ movl $0xa89c4," __XSTRING(CNAME(trap_by_rdmsr)) " \n\ popl %eax /* discard error code */ \n\ addl $2, (%esp) /* rdmsr is a 2-byte instruction */ \n\ iret \n\ "); #endif /* * Distinguish IBM Blue Lightning CPU from Cyrix CPUs that does not * support cpuid instruction. This function should be called after * loading interrupt descriptor table register. * * I don't like this method that handles fault, but I couldn't get * information for any other methods. Does blue giant know? */ static int identblue(void) { trap_by_rdmsr = 0; /* * Cyrix 486-class CPU does not support rdmsr instruction. * The rdmsr instruction generates invalid opcode fault, and exception * will be trapped by bluetrap6() on Cyrix 486-class CPU. The * bluetrap6() set the magic number to trap_by_rdmsr. */ setidt(IDT_UD, bluetrap6, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); /* * Certain BIOS disables cpuid instruction of Cyrix 6x86MX CPU. * In this case, rdmsr generates general protection fault, and * exception will be trapped by bluetrap13(). */ setidt(IDT_GP, bluetrap13, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); rdmsr(0x1002); /* Cyrix CPU generates fault. */ if (trap_by_rdmsr == 0xa8c1d) return IDENTBLUE_CYRIX486; else if (trap_by_rdmsr == 0xa89c4) return IDENTBLUE_CYRIXM2; return IDENTBLUE_IBMCPU; } /* * identifycyrix() set lower 16 bits of cyrix_did as follows: * * F E D C B A 9 8 7 6 5 4 3 2 1 0 * +-------+-------+---------------+ * | SID | RID | Device ID | * | (DIR 1) | (DIR 0) | * +-------+-------+---------------+ */ static void identifycyrix(void) { register_t saveintr; int ccr2_test = 0, dir_test = 0; u_char ccr2, ccr3; saveintr = intr_disable(); ccr2 = read_cyrix_reg(CCR2); write_cyrix_reg(CCR2, ccr2 ^ CCR2_LOCK_NW); read_cyrix_reg(CCR2); if (read_cyrix_reg(CCR2) != ccr2) ccr2_test = 1; write_cyrix_reg(CCR2, ccr2); ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, ccr3 ^ CCR3_MAPEN3); read_cyrix_reg(CCR3); if (read_cyrix_reg(CCR3) != ccr3) dir_test = 1; /* CPU supports DIRs. */ write_cyrix_reg(CCR3, ccr3); if (dir_test) { /* Device ID registers are available. */ cyrix_did = read_cyrix_reg(DIR1) << 8; cyrix_did += read_cyrix_reg(DIR0); } else if (ccr2_test) cyrix_did = 0x0010; /* 486S A-step */ else cyrix_did = 0x00ff; /* Old 486SLC/DLC and TI486SXLC/SXL */ intr_restore(saveintr); } #endif /* Update TSC freq with the value indicated by the caller. */ static void tsc_freq_changed(void *arg __unused, const struct cf_level *level, int status) { /* If there was an error during the transition, don't do anything. */ if (status != 0) return; /* Total setting for this level gives the new frequency in MHz. */ hw_clockrate = level->total_set.freq; } static void hook_tsc_freq(void *arg __unused) { if (tsc_is_invariant) return; tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, tsc_freq_changed, NULL, EVENTHANDLER_PRI_ANY); } SYSINIT(hook_tsc_freq, SI_SUB_CONFIGURE, SI_ORDER_ANY, hook_tsc_freq, NULL); static const char *const vm_bnames[] = { "QEMU", /* QEMU */ "Plex86", /* Plex86 */ "Bochs", /* Bochs */ "Xen", /* Xen */ "BHYVE", /* bhyve */ "Seabios", /* KVM */ NULL }; static const char *const vm_pnames[] = { "VMware Virtual Platform", /* VMWare VM */ "Virtual Machine", /* Microsoft VirtualPC */ "VirtualBox", /* Sun xVM VirtualBox */ "Parallels Virtual Platform", /* Parallels VM */ "KVM", /* KVM */ NULL }; void identify_hypervisor(void) { u_int regs[4]; char *p; int i; /* * [RFC] CPUID usage for interaction between Hypervisors and Linux. * http://lkml.org/lkml/2008/10/1/246 * * KB1009458: Mechanisms to determine if software is running in * a VMware virtual machine * http://kb.vmware.com/kb/1009458 */ if (cpu_feature2 & CPUID2_HV) { vm_guest = VM_GUEST_VM; do_cpuid(0x40000000, regs); /* * KVM from Linux kernels prior to commit * 57c22e5f35aa4b9b2fe11f73f3e62bbf9ef36190 set %eax * to 0 rather than a valid hv_high value. Check for * the KVM signature bytes and fixup %eax to the * highest supported leaf in that case. */ if (regs[0] == 0 && regs[1] == 0x4b4d564b && regs[2] == 0x564b4d56 && regs[3] == 0x0000004d) regs[0] = 0x40000001; if (regs[0] >= 0x40000000) { hv_high = regs[0]; ((u_int *)&hv_vendor)[0] = regs[1]; ((u_int *)&hv_vendor)[1] = regs[2]; ((u_int *)&hv_vendor)[2] = regs[3]; hv_vendor[12] = '\0'; if (strcmp(hv_vendor, "VMwareVMware") == 0) vm_guest = VM_GUEST_VMWARE; else if (strcmp(hv_vendor, "Microsoft Hv") == 0) vm_guest = VM_GUEST_HV; else if (strcmp(hv_vendor, "KVMKVMKVM") == 0) vm_guest = VM_GUEST_KVM; else if (strcmp(hv_vendor, "bhyve bhyve") == 0) vm_guest = VM_GUEST_BHYVE; } return; } /* * Examine SMBIOS strings for older hypervisors. */ p = kern_getenv("smbios.system.serial"); if (p != NULL) { if (strncmp(p, "VMware-", 7) == 0 || strncmp(p, "VMW", 3) == 0) { vmware_hvcall(VMW_HVCMD_GETVERSION, regs); if (regs[1] == VMW_HVMAGIC) { vm_guest = VM_GUEST_VMWARE; freeenv(p); return; } } freeenv(p); } /* * XXX: Some of these entries may not be needed since they were * added to FreeBSD before the checks above. */ p = kern_getenv("smbios.bios.vendor"); if (p != NULL) { for (i = 0; vm_bnames[i] != NULL; i++) if (strcmp(p, vm_bnames[i]) == 0) { vm_guest = VM_GUEST_VM; freeenv(p); return; } freeenv(p); } p = kern_getenv("smbios.system.product"); if (p != NULL) { for (i = 0; vm_pnames[i] != NULL; i++) if (strcmp(p, vm_pnames[i]) == 0) { vm_guest = VM_GUEST_VM; freeenv(p); return; } freeenv(p); } } bool fix_cpuid(void) { uint64_t msr; /* * Clear "Limit CPUID Maxval" bit and return true if the caller should * get the largest standard CPUID function number again if it is set * from BIOS. It is necessary for probing correct CPU topology later * and for the correct operation of the AVX-aware userspace. */ if (cpu_vendor_id == CPU_VENDOR_INTEL && ((CPUID_TO_FAMILY(cpu_id) == 0xf && CPUID_TO_MODEL(cpu_id) >= 0x3) || (CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xe))) { msr = rdmsr(MSR_IA32_MISC_ENABLE); if ((msr & IA32_MISC_EN_LIMCPUID) != 0) { msr &= ~IA32_MISC_EN_LIMCPUID; wrmsr(MSR_IA32_MISC_ENABLE, msr); return (true); } } /* * Re-enable AMD Topology Extension that could be disabled by BIOS * on some notebook processors. Without the extension it's really * hard to determine the correct CPU cache topology. * See BIOS and Kernel Developer’s Guide (BKDG) for AMD Family 15h * Models 60h-6Fh Processors, Publication # 50742. */ if (vm_guest == VM_GUEST_NO && cpu_vendor_id == CPU_VENDOR_AMD && CPUID_TO_FAMILY(cpu_id) == 0x15) { msr = rdmsr(MSR_EXTFEATURES); if ((msr & ((uint64_t)1 << 54)) == 0) { msr |= (uint64_t)1 << 54; wrmsr(MSR_EXTFEATURES, msr); return (true); } } return (false); } void identify_cpu1(void) { u_int regs[4]; do_cpuid(0, regs); cpu_high = regs[0]; ((u_int *)&cpu_vendor)[0] = regs[1]; ((u_int *)&cpu_vendor)[1] = regs[3]; ((u_int *)&cpu_vendor)[2] = regs[2]; cpu_vendor[12] = '\0'; do_cpuid(1, regs); cpu_id = regs[0]; cpu_procinfo = regs[1]; cpu_feature = regs[3]; cpu_feature2 = regs[2]; } void identify_cpu2(void) { u_int regs[4], cpu_stdext_disable; if (cpu_high >= 7) { cpuid_count(7, 0, regs); cpu_stdext_feature = regs[1]; /* * Some hypervisors failed to filter out unsupported * extended features. Allow to disable the * extensions, activation of which requires setting a * bit in CR4, and which VM monitors do not support. */ cpu_stdext_disable = 0; TUNABLE_INT_FETCH("hw.cpu_stdext_disable", &cpu_stdext_disable); cpu_stdext_feature &= ~cpu_stdext_disable; cpu_stdext_feature2 = regs[2]; cpu_stdext_feature3 = regs[3]; if ((cpu_stdext_feature3 & CPUID_STDEXT3_ARCH_CAP) != 0) cpu_ia32_arch_caps = rdmsr(MSR_IA32_ARCH_CAP); } } +void +identify_cpu_fixup_bsp(void) +{ + u_int regs[4]; + + cpu_vendor_id = find_cpu_vendor_id(); + + if (fix_cpuid()) { + do_cpuid(0, regs); + cpu_high = regs[0]; + } +} + /* * Final stage of CPU identification. */ void finishidentcpu(void) { u_int regs[4]; #ifdef __i386__ u_char ccr3; #endif - cpu_vendor_id = find_cpu_vendor_id(); - - if (fix_cpuid()) { - do_cpuid(0, regs); - cpu_high = regs[0]; - } + identify_cpu_fixup_bsp(); if (cpu_high >= 5 && (cpu_feature2 & CPUID2_MON) != 0) { do_cpuid(5, regs); cpu_mon_mwait_flags = regs[2]; cpu_mon_min_size = regs[0] & CPUID5_MON_MIN_SIZE; cpu_mon_max_size = regs[1] & CPUID5_MON_MAX_SIZE; } identify_cpu2(); #ifdef __i386__ if (cpu_high > 0 && (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_TRANSMETA || cpu_vendor_id == CPU_VENDOR_CENTAUR || cpu_vendor_id == CPU_VENDOR_NSC)) { do_cpuid(0x80000000, regs); if (regs[0] >= 0x80000000) cpu_exthigh = regs[0]; } #else if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_CENTAUR) { do_cpuid(0x80000000, regs); cpu_exthigh = regs[0]; } #endif if (cpu_exthigh >= 0x80000001) { do_cpuid(0x80000001, regs); amd_feature = regs[3] & ~(cpu_feature & 0x0183f3ff); amd_feature2 = regs[2]; } if (cpu_exthigh >= 0x80000007) { do_cpuid(0x80000007, regs); amd_rascap = regs[1]; amd_pminfo = regs[3]; } if (cpu_exthigh >= 0x80000008) { do_cpuid(0x80000008, regs); cpu_maxphyaddr = regs[0] & 0xff; amd_extended_feature_extensions = regs[1]; cpu_procinfo2 = regs[2]; } else { cpu_maxphyaddr = (cpu_feature & CPUID_PAE) != 0 ? 36 : 32; } #ifdef __i386__ if (cpu_vendor_id == CPU_VENDOR_CYRIX) { if (cpu == CPU_486) { /* * These conditions are equivalent to: * - CPU does not support cpuid instruction. * - Cyrix/IBM CPU is detected. */ if (identblue() == IDENTBLUE_IBMCPU) { strcpy(cpu_vendor, "IBM"); cpu_vendor_id = CPU_VENDOR_IBM; cpu = CPU_BLUE; return; } } switch (cpu_id & 0xf00) { case 0x600: /* * Cyrix's datasheet does not describe DIRs. * Therefor, I assume it does not have them * and use the result of the cpuid instruction. * XXX they seem to have it for now at least. -Peter */ identifycyrix(); cpu = CPU_M2; break; default: identifycyrix(); /* * This routine contains a trick. * Don't check (cpu_id & 0x00f0) == 0x50 to detect M2, now. */ switch (cyrix_did & 0x00f0) { case 0x00: case 0xf0: cpu = CPU_486DLC; break; case 0x10: cpu = CPU_CY486DX; break; case 0x20: if ((cyrix_did & 0x000f) < 8) cpu = CPU_M1; else cpu = CPU_M1SC; break; case 0x30: cpu = CPU_M1; break; case 0x40: /* MediaGX CPU */ cpu = CPU_M1SC; break; default: /* M2 and later CPUs are treated as M2. */ cpu = CPU_M2; /* * enable cpuid instruction. */ ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); write_cyrix_reg(CCR4, read_cyrix_reg(CCR4) | CCR4_CPUID); write_cyrix_reg(CCR3, ccr3); do_cpuid(0, regs); cpu_high = regs[0]; /* eax */ do_cpuid(1, regs); cpu_id = regs[0]; /* eax */ cpu_feature = regs[3]; /* edx */ break; } } } else if (cpu == CPU_486 && *cpu_vendor == '\0') { /* * There are BlueLightning CPUs that do not change * undefined flags by dividing 5 by 2. In this case, * the CPU identification routine in locore.s leaves * cpu_vendor null string and puts CPU_486 into the * cpu. */ if (identblue() == IDENTBLUE_IBMCPU) { strcpy(cpu_vendor, "IBM"); cpu_vendor_id = CPU_VENDOR_IBM; cpu = CPU_BLUE; return; } } #endif } int pti_get_default(void) { if (strcmp(cpu_vendor, AMD_VENDOR_ID) == 0) return (0); if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_RDCL_NO) != 0) return (0); return (1); } static u_int find_cpu_vendor_id(void) { int i; for (i = 0; i < nitems(cpu_vendors); i++) if (strcmp(cpu_vendor, cpu_vendors[i].vendor) == 0) return (cpu_vendors[i].vendor_id); return (0); } static void print_AMD_assoc(int i) { if (i == 255) printf(", fully associative\n"); else printf(", %d-way associative\n", i); } static void print_AMD_l2_assoc(int i) { switch (i & 0x0f) { case 0: printf(", disabled/not present\n"); break; case 1: printf(", direct mapped\n"); break; case 2: printf(", 2-way associative\n"); break; case 4: printf(", 4-way associative\n"); break; case 6: printf(", 8-way associative\n"); break; case 8: printf(", 16-way associative\n"); break; case 15: printf(", fully associative\n"); break; default: printf(", reserved configuration\n"); break; } } static void print_AMD_info(void) { #ifdef __i386__ uint64_t amd_whcr; #endif u_int regs[4]; if (cpu_exthigh >= 0x80000005) { do_cpuid(0x80000005, regs); printf("L1 2MB data TLB: %d entries", (regs[0] >> 16) & 0xff); print_AMD_assoc(regs[0] >> 24); printf("L1 2MB instruction TLB: %d entries", regs[0] & 0xff); print_AMD_assoc((regs[0] >> 8) & 0xff); printf("L1 4KB data TLB: %d entries", (regs[1] >> 16) & 0xff); print_AMD_assoc(regs[1] >> 24); printf("L1 4KB instruction TLB: %d entries", regs[1] & 0xff); print_AMD_assoc((regs[1] >> 8) & 0xff); printf("L1 data cache: %d kbytes", regs[2] >> 24); printf(", %d bytes/line", regs[2] & 0xff); printf(", %d lines/tag", (regs[2] >> 8) & 0xff); print_AMD_assoc((regs[2] >> 16) & 0xff); printf("L1 instruction cache: %d kbytes", regs[3] >> 24); printf(", %d bytes/line", regs[3] & 0xff); printf(", %d lines/tag", (regs[3] >> 8) & 0xff); print_AMD_assoc((regs[3] >> 16) & 0xff); } if (cpu_exthigh >= 0x80000006) { do_cpuid(0x80000006, regs); if ((regs[0] >> 16) != 0) { printf("L2 2MB data TLB: %d entries", (regs[0] >> 16) & 0xfff); print_AMD_l2_assoc(regs[0] >> 28); printf("L2 2MB instruction TLB: %d entries", regs[0] & 0xfff); print_AMD_l2_assoc((regs[0] >> 28) & 0xf); } else { printf("L2 2MB unified TLB: %d entries", regs[0] & 0xfff); print_AMD_l2_assoc((regs[0] >> 28) & 0xf); } if ((regs[1] >> 16) != 0) { printf("L2 4KB data TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc(regs[1] >> 28); printf("L2 4KB instruction TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc((regs[1] >> 28) & 0xf); } else { printf("L2 4KB unified TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc((regs[1] >> 28) & 0xf); } printf("L2 unified cache: %d kbytes", regs[2] >> 16); printf(", %d bytes/line", regs[2] & 0xff); printf(", %d lines/tag", (regs[2] >> 8) & 0x0f); print_AMD_l2_assoc((regs[2] >> 12) & 0x0f); } #ifdef __i386__ if (((cpu_id & 0xf00) == 0x500) && (((cpu_id & 0x0f0) > 0x80) || (((cpu_id & 0x0f0) == 0x80) && (cpu_id & 0x00f) > 0x07))) { /* K6-2(new core [Stepping 8-F]), K6-III or later */ amd_whcr = rdmsr(0xc0000082); if (!(amd_whcr & (0x3ff << 22))) { printf("Write Allocate Disable\n"); } else { printf("Write Allocate Enable Limit: %dM bytes\n", (u_int32_t)((amd_whcr & (0x3ff << 22)) >> 22) * 4); printf("Write Allocate 15-16M bytes: %s\n", (amd_whcr & (1 << 16)) ? "Enable" : "Disable"); } } else if (((cpu_id & 0xf00) == 0x500) && ((cpu_id & 0x0f0) > 0x50)) { /* K6, K6-2(old core) */ amd_whcr = rdmsr(0xc0000082); if (!(amd_whcr & (0x7f << 1))) { printf("Write Allocate Disable\n"); } else { printf("Write Allocate Enable Limit: %dM bytes\n", (u_int32_t)((amd_whcr & (0x7f << 1)) >> 1) * 4); printf("Write Allocate 15-16M bytes: %s\n", (amd_whcr & 0x0001) ? "Enable" : "Disable"); printf("Hardware Write Allocate Control: %s\n", (amd_whcr & 0x0100) ? "Enable" : "Disable"); } } #endif /* * Opteron Rev E shows a bug as in very rare occasions a read memory * barrier is not performed as expected if it is followed by a * non-atomic read-modify-write instruction. * As long as that bug pops up very rarely (intensive machine usage * on other operating systems generally generates one unexplainable * crash any 2 months) and as long as a model specific fix would be * impractical at this stage, print out a warning string if the broken * model and family are identified. */ if (CPUID_TO_FAMILY(cpu_id) == 0xf && CPUID_TO_MODEL(cpu_id) >= 0x20 && CPUID_TO_MODEL(cpu_id) <= 0x3f) printf("WARNING: This architecture revision has known SMP " "hardware bugs which may cause random instability\n"); } static void print_INTEL_info(void) { u_int regs[4]; u_int rounds, regnum; u_int nwaycode, nway; if (cpu_high >= 2) { rounds = 0; do { do_cpuid(0x2, regs); if (rounds == 0 && (rounds = (regs[0] & 0xff)) == 0) break; /* we have a buggy CPU */ for (regnum = 0; regnum <= 3; ++regnum) { if (regs[regnum] & (1<<31)) continue; if (regnum != 0) print_INTEL_TLB(regs[regnum] & 0xff); print_INTEL_TLB((regs[regnum] >> 8) & 0xff); print_INTEL_TLB((regs[regnum] >> 16) & 0xff); print_INTEL_TLB((regs[regnum] >> 24) & 0xff); } } while (--rounds > 0); } if (cpu_exthigh >= 0x80000006) { do_cpuid(0x80000006, regs); nwaycode = (regs[2] >> 12) & 0x0f; if (nwaycode >= 0x02 && nwaycode <= 0x08) nway = 1 << (nwaycode / 2); else nway = 0; printf("L2 cache: %u kbytes, %u-way associative, %u bytes/line\n", (regs[2] >> 16) & 0xffff, nway, regs[2] & 0xff); } } static void print_INTEL_TLB(u_int data) { switch (data) { case 0x0: case 0x40: default: break; case 0x1: printf("Instruction TLB: 4 KB pages, 4-way set associative, 32 entries\n"); break; case 0x2: printf("Instruction TLB: 4 MB pages, fully associative, 2 entries\n"); break; case 0x3: printf("Data TLB: 4 KB pages, 4-way set associative, 64 entries\n"); break; case 0x4: printf("Data TLB: 4 MB Pages, 4-way set associative, 8 entries\n"); break; case 0x6: printf("1st-level instruction cache: 8 KB, 4-way set associative, 32 byte line size\n"); break; case 0x8: printf("1st-level instruction cache: 16 KB, 4-way set associative, 32 byte line size\n"); break; case 0x9: printf("1st-level instruction cache: 32 KB, 4-way set associative, 64 byte line size\n"); break; case 0xa: printf("1st-level data cache: 8 KB, 2-way set associative, 32 byte line size\n"); break; case 0xb: printf("Instruction TLB: 4 MByte pages, 4-way set associative, 4 entries\n"); break; case 0xc: printf("1st-level data cache: 16 KB, 4-way set associative, 32 byte line size\n"); break; case 0xd: printf("1st-level data cache: 16 KBytes, 4-way set associative, 64 byte line size"); break; case 0xe: printf("1st-level data cache: 24 KBytes, 6-way set associative, 64 byte line size\n"); break; case 0x1d: printf("2nd-level cache: 128 KBytes, 2-way set associative, 64 byte line size\n"); break; case 0x21: printf("2nd-level cache: 256 KBytes, 8-way set associative, 64 byte line size\n"); break; case 0x22: printf("3rd-level cache: 512 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x23: printf("3rd-level cache: 1 MB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x24: printf("2nd-level cache: 1 MBytes, 16-way set associative, 64 byte line size\n"); break; case 0x25: printf("3rd-level cache: 2 MB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x29: printf("3rd-level cache: 4 MB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x2c: printf("1st-level data cache: 32 KB, 8-way set associative, 64 byte line size\n"); break; case 0x30: printf("1st-level instruction cache: 32 KB, 8-way set associative, 64 byte line size\n"); break; case 0x39: /* De-listed in SDM rev. 54 */ printf("2nd-level cache: 128 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x3b: /* De-listed in SDM rev. 54 */ printf("2nd-level cache: 128 KB, 2-way set associative, sectored cache, 64 byte line size\n"); break; case 0x3c: /* De-listed in SDM rev. 54 */ printf("2nd-level cache: 256 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x41: printf("2nd-level cache: 128 KB, 4-way set associative, 32 byte line size\n"); break; case 0x42: printf("2nd-level cache: 256 KB, 4-way set associative, 32 byte line size\n"); break; case 0x43: printf("2nd-level cache: 512 KB, 4-way set associative, 32 byte line size\n"); break; case 0x44: printf("2nd-level cache: 1 MB, 4-way set associative, 32 byte line size\n"); break; case 0x45: printf("2nd-level cache: 2 MB, 4-way set associative, 32 byte line size\n"); break; case 0x46: printf("3rd-level cache: 4 MB, 4-way set associative, 64 byte line size\n"); break; case 0x47: printf("3rd-level cache: 8 MB, 8-way set associative, 64 byte line size\n"); break; case 0x48: printf("2nd-level cache: 3MByte, 12-way set associative, 64 byte line size\n"); break; case 0x49: if (CPUID_TO_FAMILY(cpu_id) == 0xf && CPUID_TO_MODEL(cpu_id) == 0x6) printf("3rd-level cache: 4MB, 16-way set associative, 64-byte line size\n"); else printf("2nd-level cache: 4 MByte, 16-way set associative, 64 byte line size"); break; case 0x4a: printf("3rd-level cache: 6MByte, 12-way set associative, 64 byte line size\n"); break; case 0x4b: printf("3rd-level cache: 8MByte, 16-way set associative, 64 byte line size\n"); break; case 0x4c: printf("3rd-level cache: 12MByte, 12-way set associative, 64 byte line size\n"); break; case 0x4d: printf("3rd-level cache: 16MByte, 16-way set associative, 64 byte line size\n"); break; case 0x4e: printf("2nd-level cache: 6MByte, 24-way set associative, 64 byte line size\n"); break; case 0x4f: printf("Instruction TLB: 4 KByte pages, 32 entries\n"); break; case 0x50: printf("Instruction TLB: 4 KB, 2 MB or 4 MB pages, fully associative, 64 entries\n"); break; case 0x51: printf("Instruction TLB: 4 KB, 2 MB or 4 MB pages, fully associative, 128 entries\n"); break; case 0x52: printf("Instruction TLB: 4 KB, 2 MB or 4 MB pages, fully associative, 256 entries\n"); break; case 0x55: printf("Instruction TLB: 2-MByte or 4-MByte pages, fully associative, 7 entries\n"); break; case 0x56: printf("Data TLB0: 4 MByte pages, 4-way set associative, 16 entries\n"); break; case 0x57: printf("Data TLB0: 4 KByte pages, 4-way associative, 16 entries\n"); break; case 0x59: printf("Data TLB0: 4 KByte pages, fully associative, 16 entries\n"); break; case 0x5a: printf("Data TLB0: 2-MByte or 4 MByte pages, 4-way set associative, 32 entries\n"); break; case 0x5b: printf("Data TLB: 4 KB or 4 MB pages, fully associative, 64 entries\n"); break; case 0x5c: printf("Data TLB: 4 KB or 4 MB pages, fully associative, 128 entries\n"); break; case 0x5d: printf("Data TLB: 4 KB or 4 MB pages, fully associative, 256 entries\n"); break; case 0x60: printf("1st-level data cache: 16 KB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x61: printf("Instruction TLB: 4 KByte pages, fully associative, 48 entries\n"); break; case 0x63: printf("Data TLB: 2 MByte or 4 MByte pages, 4-way set associative, 32 entries and a separate array with 1 GByte pages, 4-way set associative, 4 entries\n"); break; case 0x64: printf("Data TLB: 4 KBytes pages, 4-way set associative, 512 entries\n"); break; case 0x66: printf("1st-level data cache: 8 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x67: printf("1st-level data cache: 16 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x68: printf("1st-level data cache: 32 KB, 4 way set associative, sectored cache, 64 byte line size\n"); break; case 0x6a: printf("uTLB: 4KByte pages, 8-way set associative, 64 entries\n"); break; case 0x6b: printf("DTLB: 4KByte pages, 8-way set associative, 256 entries\n"); break; case 0x6c: printf("DTLB: 2M/4M pages, 8-way set associative, 128 entries\n"); break; case 0x6d: printf("DTLB: 1 GByte pages, fully associative, 16 entries\n"); break; case 0x70: printf("Trace cache: 12K-uops, 8-way set associative\n"); break; case 0x71: printf("Trace cache: 16K-uops, 8-way set associative\n"); break; case 0x72: printf("Trace cache: 32K-uops, 8-way set associative\n"); break; case 0x76: printf("Instruction TLB: 2M/4M pages, fully associative, 8 entries\n"); break; case 0x78: printf("2nd-level cache: 1 MB, 4-way set associative, 64-byte line size\n"); break; case 0x79: printf("2nd-level cache: 128 KB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x7a: printf("2nd-level cache: 256 KB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x7b: printf("2nd-level cache: 512 KB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x7c: printf("2nd-level cache: 1 MB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x7d: printf("2nd-level cache: 2-MB, 8-way set associative, 64-byte line size\n"); break; case 0x7f: printf("2nd-level cache: 512-KB, 2-way set associative, 64-byte line size\n"); break; case 0x80: printf("2nd-level cache: 512 KByte, 8-way set associative, 64-byte line size\n"); break; case 0x82: printf("2nd-level cache: 256 KB, 8-way set associative, 32 byte line size\n"); break; case 0x83: printf("2nd-level cache: 512 KB, 8-way set associative, 32 byte line size\n"); break; case 0x84: printf("2nd-level cache: 1 MB, 8-way set associative, 32 byte line size\n"); break; case 0x85: printf("2nd-level cache: 2 MB, 8-way set associative, 32 byte line size\n"); break; case 0x86: printf("2nd-level cache: 512 KB, 4-way set associative, 64 byte line size\n"); break; case 0x87: printf("2nd-level cache: 1 MB, 8-way set associative, 64 byte line size\n"); break; case 0xa0: printf("DTLB: 4k pages, fully associative, 32 entries\n"); break; case 0xb0: printf("Instruction TLB: 4 KB Pages, 4-way set associative, 128 entries\n"); break; case 0xb1: printf("Instruction TLB: 2M pages, 4-way, 8 entries or 4M pages, 4-way, 4 entries\n"); break; case 0xb2: printf("Instruction TLB: 4KByte pages, 4-way set associative, 64 entries\n"); break; case 0xb3: printf("Data TLB: 4 KB Pages, 4-way set associative, 128 entries\n"); break; case 0xb4: printf("Data TLB1: 4 KByte pages, 4-way associative, 256 entries\n"); break; case 0xb5: printf("Instruction TLB: 4KByte pages, 8-way set associative, 64 entries\n"); break; case 0xb6: printf("Instruction TLB: 4KByte pages, 8-way set associative, 128 entries\n"); break; case 0xba: printf("Data TLB1: 4 KByte pages, 4-way associative, 64 entries\n"); break; case 0xc0: printf("Data TLB: 4 KByte and 4 MByte pages, 4-way associative, 8 entries\n"); break; case 0xc1: printf("Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries\n"); break; case 0xc2: printf("DTLB: 4 KByte/2 MByte pages, 4-way associative, 16 entries\n"); break; case 0xc3: printf("Shared 2nd-Level TLB: 4 KByte /2 MByte pages, 6-way associative, 1536 entries. Also 1GBbyte pages, 4-way, 16 entries\n"); break; case 0xc4: printf("DTLB: 2M/4M Byte pages, 4-way associative, 32 entries\n"); break; case 0xca: printf("Shared 2nd-Level TLB: 4 KByte pages, 4-way associative, 512 entries\n"); break; case 0xd0: printf("3rd-level cache: 512 KByte, 4-way set associative, 64 byte line size\n"); break; case 0xd1: printf("3rd-level cache: 1 MByte, 4-way set associative, 64 byte line size\n"); break; case 0xd2: printf("3rd-level cache: 2 MByte, 4-way set associative, 64 byte line size\n"); break; case 0xd6: printf("3rd-level cache: 1 MByte, 8-way set associative, 64 byte line size\n"); break; case 0xd7: printf("3rd-level cache: 2 MByte, 8-way set associative, 64 byte line size\n"); break; case 0xd8: printf("3rd-level cache: 4 MByte, 8-way set associative, 64 byte line size\n"); break; case 0xdc: printf("3rd-level cache: 1.5 MByte, 12-way set associative, 64 byte line size\n"); break; case 0xdd: printf("3rd-level cache: 3 MByte, 12-way set associative, 64 byte line size\n"); break; case 0xde: printf("3rd-level cache: 6 MByte, 12-way set associative, 64 byte line size\n"); break; case 0xe2: printf("3rd-level cache: 2 MByte, 16-way set associative, 64 byte line size\n"); break; case 0xe3: printf("3rd-level cache: 4 MByte, 16-way set associative, 64 byte line size\n"); break; case 0xe4: printf("3rd-level cache: 8 MByte, 16-way set associative, 64 byte line size\n"); break; case 0xea: printf("3rd-level cache: 12MByte, 24-way set associative, 64 byte line size\n"); break; case 0xeb: printf("3rd-level cache: 18MByte, 24-way set associative, 64 byte line size\n"); break; case 0xec: printf("3rd-level cache: 24MByte, 24-way set associative, 64 byte line size\n"); break; case 0xf0: printf("64-Byte prefetching\n"); break; case 0xf1: printf("128-Byte prefetching\n"); break; } } static void print_svm_info(void) { u_int features, regs[4]; uint64_t msr; int comma; printf("\n SVM: "); do_cpuid(0x8000000A, regs); features = regs[3]; msr = rdmsr(MSR_VM_CR); if ((msr & VM_CR_SVMDIS) == VM_CR_SVMDIS) printf("(disabled in BIOS) "); if (!bootverbose) { comma = 0; if (features & (1 << 0)) { printf("%sNP", comma ? "," : ""); comma = 1; } if (features & (1 << 3)) { printf("%sNRIP", comma ? "," : ""); comma = 1; } if (features & (1 << 5)) { printf("%sVClean", comma ? "," : ""); comma = 1; } if (features & (1 << 6)) { printf("%sAFlush", comma ? "," : ""); comma = 1; } if (features & (1 << 7)) { printf("%sDAssist", comma ? "," : ""); comma = 1; } printf("%sNAsids=%d", comma ? "," : "", regs[1]); return; } printf("Features=0x%b", features, "\020" "\001NP" /* Nested paging */ "\002LbrVirt" /* LBR virtualization */ "\003SVML" /* SVM lock */ "\004NRIPS" /* NRIP save */ "\005TscRateMsr" /* MSR based TSC rate control */ "\006VmcbClean" /* VMCB clean bits */ "\007FlushByAsid" /* Flush by ASID */ "\010DecodeAssist" /* Decode assist */ "\011" "\012" "\013PauseFilter" /* PAUSE intercept filter */ "\014EncryptedMcodePatch" "\015PauseFilterThreshold" /* PAUSE filter threshold */ "\016AVIC" /* virtual interrupt controller */ "\017" "\020V_VMSAVE_VMLOAD" "\021vGIF" "\022" "\023" "\024" "\025" "\026" "\027" "\030" "\031" "\032" "\033" "\034" "\035" "\036" "\037" "\040" ); printf("\nRevision=%d, ASIDs=%d", regs[0] & 0xff, regs[1]); } #ifdef __i386__ static void print_transmeta_info(void) { u_int regs[4], nreg = 0; do_cpuid(0x80860000, regs); nreg = regs[0]; if (nreg >= 0x80860001) { do_cpuid(0x80860001, regs); printf(" Processor revision %u.%u.%u.%u\n", (regs[1] >> 24) & 0xff, (regs[1] >> 16) & 0xff, (regs[1] >> 8) & 0xff, regs[1] & 0xff); } if (nreg >= 0x80860002) { do_cpuid(0x80860002, regs); printf(" Code Morphing Software revision %u.%u.%u-%u-%u\n", (regs[1] >> 24) & 0xff, (regs[1] >> 16) & 0xff, (regs[1] >> 8) & 0xff, regs[1] & 0xff, regs[2]); } if (nreg >= 0x80860006) { char info[65]; do_cpuid(0x80860003, (u_int*) &info[0]); do_cpuid(0x80860004, (u_int*) &info[16]); do_cpuid(0x80860005, (u_int*) &info[32]); do_cpuid(0x80860006, (u_int*) &info[48]); info[64] = 0; printf(" %s\n", info); } } #endif static void print_via_padlock_info(void) { u_int regs[4]; do_cpuid(0xc0000001, regs); printf("\n VIA Padlock Features=0x%b", regs[3], "\020" "\003RNG" /* RNG */ "\007AES" /* ACE */ "\011AES-CTR" /* ACE2 */ "\013SHA1,SHA256" /* PHE */ "\015RSA" /* PMM */ ); } static uint32_t vmx_settable(uint64_t basic, int msr, int true_msr) { uint64_t val; if (basic & (1ULL << 55)) val = rdmsr(true_msr); else val = rdmsr(msr); /* Just report the controls that can be set to 1. */ return (val >> 32); } static void print_vmx_info(void) { uint64_t basic, msr; uint32_t entry, exit, mask, pin, proc, proc2; int comma; printf("\n VT-x: "); msr = rdmsr(MSR_IA32_FEATURE_CONTROL); if (!(msr & IA32_FEATURE_CONTROL_VMX_EN)) printf("(disabled in BIOS) "); basic = rdmsr(MSR_VMX_BASIC); pin = vmx_settable(basic, MSR_VMX_PINBASED_CTLS, MSR_VMX_TRUE_PINBASED_CTLS); proc = vmx_settable(basic, MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS); if (proc & PROCBASED_SECONDARY_CONTROLS) proc2 = vmx_settable(basic, MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2); else proc2 = 0; exit = vmx_settable(basic, MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS); entry = vmx_settable(basic, MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS); if (!bootverbose) { comma = 0; if (exit & VM_EXIT_SAVE_PAT && exit & VM_EXIT_LOAD_PAT && entry & VM_ENTRY_LOAD_PAT) { printf("%sPAT", comma ? "," : ""); comma = 1; } if (proc & PROCBASED_HLT_EXITING) { printf("%sHLT", comma ? "," : ""); comma = 1; } if (proc & PROCBASED_MTF) { printf("%sMTF", comma ? "," : ""); comma = 1; } if (proc & PROCBASED_PAUSE_EXITING) { printf("%sPAUSE", comma ? "," : ""); comma = 1; } if (proc2 & PROCBASED2_ENABLE_EPT) { printf("%sEPT", comma ? "," : ""); comma = 1; } if (proc2 & PROCBASED2_UNRESTRICTED_GUEST) { printf("%sUG", comma ? "," : ""); comma = 1; } if (proc2 & PROCBASED2_ENABLE_VPID) { printf("%sVPID", comma ? "," : ""); comma = 1; } if (proc & PROCBASED_USE_TPR_SHADOW && proc2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES && proc2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE && proc2 & PROCBASED2_APIC_REGISTER_VIRTUALIZATION && proc2 & PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY) { printf("%sVID", comma ? "," : ""); comma = 1; if (pin & PINBASED_POSTED_INTERRUPT) printf(",PostIntr"); } return; } mask = basic >> 32; printf("Basic Features=0x%b", mask, "\020" "\02132PA" /* 32-bit physical addresses */ "\022SMM" /* SMM dual-monitor */ "\027INS/OUTS" /* VM-exit info for INS and OUTS */ "\030TRUE" /* TRUE_CTLS MSRs */ ); printf("\n Pin-Based Controls=0x%b", pin, "\020" "\001ExtINT" /* External-interrupt exiting */ "\004NMI" /* NMI exiting */ "\006VNMI" /* Virtual NMIs */ "\007PreTmr" /* Activate VMX-preemption timer */ "\010PostIntr" /* Process posted interrupts */ ); printf("\n Primary Processor Controls=0x%b", proc, "\020" "\003INTWIN" /* Interrupt-window exiting */ "\004TSCOff" /* Use TSC offsetting */ "\010HLT" /* HLT exiting */ "\012INVLPG" /* INVLPG exiting */ "\013MWAIT" /* MWAIT exiting */ "\014RDPMC" /* RDPMC exiting */ "\015RDTSC" /* RDTSC exiting */ "\020CR3-LD" /* CR3-load exiting */ "\021CR3-ST" /* CR3-store exiting */ "\024CR8-LD" /* CR8-load exiting */ "\025CR8-ST" /* CR8-store exiting */ "\026TPR" /* Use TPR shadow */ "\027NMIWIN" /* NMI-window exiting */ "\030MOV-DR" /* MOV-DR exiting */ "\031IO" /* Unconditional I/O exiting */ "\032IOmap" /* Use I/O bitmaps */ "\034MTF" /* Monitor trap flag */ "\035MSRmap" /* Use MSR bitmaps */ "\036MONITOR" /* MONITOR exiting */ "\037PAUSE" /* PAUSE exiting */ ); if (proc & PROCBASED_SECONDARY_CONTROLS) printf("\n Secondary Processor Controls=0x%b", proc2, "\020" "\001APIC" /* Virtualize APIC accesses */ "\002EPT" /* Enable EPT */ "\003DT" /* Descriptor-table exiting */ "\004RDTSCP" /* Enable RDTSCP */ "\005x2APIC" /* Virtualize x2APIC mode */ "\006VPID" /* Enable VPID */ "\007WBINVD" /* WBINVD exiting */ "\010UG" /* Unrestricted guest */ "\011APIC-reg" /* APIC-register virtualization */ "\012VID" /* Virtual-interrupt delivery */ "\013PAUSE-loop" /* PAUSE-loop exiting */ "\014RDRAND" /* RDRAND exiting */ "\015INVPCID" /* Enable INVPCID */ "\016VMFUNC" /* Enable VM functions */ "\017VMCS" /* VMCS shadowing */ "\020EPT#VE" /* EPT-violation #VE */ "\021XSAVES" /* Enable XSAVES/XRSTORS */ ); printf("\n Exit Controls=0x%b", mask, "\020" "\003DR" /* Save debug controls */ /* Ignore Host address-space size */ "\015PERF" /* Load MSR_PERF_GLOBAL_CTRL */ "\020AckInt" /* Acknowledge interrupt on exit */ "\023PAT-SV" /* Save MSR_PAT */ "\024PAT-LD" /* Load MSR_PAT */ "\025EFER-SV" /* Save MSR_EFER */ "\026EFER-LD" /* Load MSR_EFER */ "\027PTMR-SV" /* Save VMX-preemption timer value */ ); printf("\n Entry Controls=0x%b", mask, "\020" "\003DR" /* Save debug controls */ /* Ignore IA-32e mode guest */ /* Ignore Entry to SMM */ /* Ignore Deactivate dual-monitor treatment */ "\016PERF" /* Load MSR_PERF_GLOBAL_CTRL */ "\017PAT" /* Load MSR_PAT */ "\020EFER" /* Load MSR_EFER */ ); if (proc & PROCBASED_SECONDARY_CONTROLS && (proc2 & (PROCBASED2_ENABLE_EPT | PROCBASED2_ENABLE_VPID)) != 0) { msr = rdmsr(MSR_VMX_EPT_VPID_CAP); mask = msr; printf("\n EPT Features=0x%b", mask, "\020" "\001XO" /* Execute-only translations */ "\007PW4" /* Page-walk length of 4 */ "\011UC" /* EPT paging-structure mem can be UC */ "\017WB" /* EPT paging-structure mem can be WB */ "\0212M" /* EPT PDE can map a 2-Mbyte page */ "\0221G" /* EPT PDPTE can map a 1-Gbyte page */ "\025INVEPT" /* INVEPT is supported */ "\026AD" /* Accessed and dirty flags for EPT */ "\032single" /* INVEPT single-context type */ "\033all" /* INVEPT all-context type */ ); mask = msr >> 32; printf("\n VPID Features=0x%b", mask, "\020" "\001INVVPID" /* INVVPID is supported */ "\011individual" /* INVVPID individual-address type */ "\012single" /* INVVPID single-context type */ "\013all" /* INVVPID all-context type */ /* INVVPID single-context-retaining-globals type */ "\014single-globals" ); } } static void print_hypervisor_info(void) { if (*hv_vendor) printf("Hypervisor: Origin = \"%s\"\n", hv_vendor); } Index: projects/clang700-import/targets/pseudo/userland/lib/Makefile.depend =================================================================== --- projects/clang700-import/targets/pseudo/userland/lib/Makefile.depend (revision 340426) +++ projects/clang700-import/targets/pseudo/userland/lib/Makefile.depend (revision 340427) @@ -1,264 +1,261 @@ # $FreeBSD$ # This file is not autogenerated - take care! .include DIRDEPS = \ lib/${CSU_DIR} \ lib/atf/libatf-c \ lib/atf/libatf-c++ \ lib/geom/cache \ lib/geom/concat \ lib/geom/eli \ lib/geom/journal \ lib/geom/label \ lib/geom/mirror \ lib/geom/mountver \ lib/geom/multipath \ lib/geom/nop \ lib/geom/part \ lib/geom/raid \ lib/geom/raid3 \ lib/geom/sched \ lib/geom/shsec \ lib/geom/stripe \ lib/geom/virstor \ lib/lib80211 \ lib/libalias/libalias \ lib/libalias/modules/cuseeme \ lib/libalias/modules/dummy \ lib/libalias/modules/ftp \ lib/libalias/modules/irc \ lib/libalias/modules/nbt \ lib/libalias/modules/pptp \ lib/libalias/modules/skinny \ lib/libalias/modules/smedia \ lib/libarchive \ lib/libauditd \ lib/libbegemot \ lib/libblocksruntime \ lib/libbluetooth \ lib/libbsdstat \ lib/libbsm \ lib/libbsnmp/libbsnmp \ lib/libbz2 \ lib/libc \ lib/libc_nonshared \ lib/libc++ \ lib/libc++experimental \ lib/libcalendar \ lib/libcam \ lib/libcapsicum \ lib/libcom_err/doc \ lib/libcompat \ lib/libcompiler_rt \ lib/libcrypt \ lib/libcuse \ lib/libcxxrt \ lib/libdevctl \ lib/libdevdctl \ lib/libdevinfo \ lib/libdevstat \ + lib/libdl \ lib/libdwarf \ lib/libedit/edit/readline \ lib/libelf \ lib/libelftc \ lib/libevent \ lib/libexecinfo \ lib/libexpat \ lib/libfetch \ lib/libfigpar \ lib/libgeom \ lib/libgssapi \ lib/libiconv_modules/BIG5 \ lib/libiconv_modules/DECHanyu \ lib/libiconv_modules/EUC \ lib/libiconv_modules/EUCTW \ lib/libiconv_modules/GBK2K \ lib/libiconv_modules/HZ \ lib/libiconv_modules/ISO2022 \ lib/libiconv_modules/JOHAB \ lib/libiconv_modules/MSKanji \ lib/libiconv_modules/UES \ lib/libiconv_modules/UTF1632 \ lib/libiconv_modules/UTF7 \ lib/libiconv_modules/UTF8 \ lib/libiconv_modules/VIQR \ lib/libiconv_modules/ZW \ lib/libiconv_modules/iconv_none \ lib/libiconv_modules/iconv_std \ lib/libiconv_modules/mapper_646 \ lib/libiconv_modules/mapper_none \ lib/libiconv_modules/mapper_parallel \ lib/libiconv_modules/mapper_serial \ lib/libiconv_modules/mapper_std \ lib/libiconv_modules/mapper_zone \ lib/libifconfig \ lib/libipsec \ lib/libipx \ lib/libjail \ lib/libkiconv \ lib/libkvm \ lib/libldns \ lib/liblzma \ lib/libmagic \ lib/libmandoc \ lib/libmd \ lib/libmemstat \ lib/libmilter \ lib/libmp \ lib/libnetgraph \ lib/libngatm \ lib/libnv \ lib/libopie \ lib/libpam/libpam \ lib/libpam/modules/pam_chroot \ lib/libpam/modules/pam_deny \ lib/libpam/modules/pam_echo \ lib/libpam/modules/pam_exec \ lib/libpam/modules/pam_ftpusers \ lib/libpam/modules/pam_group \ lib/libpam/modules/pam_guest \ lib/libpam/modules/pam_krb5 \ lib/libpam/modules/pam_ksu \ lib/libpam/modules/pam_lastlog \ lib/libpam/modules/pam_login_access \ lib/libpam/modules/pam_nologin \ lib/libpam/modules/pam_opie \ lib/libpam/modules/pam_opieaccess \ lib/libpam/modules/pam_passwdqc \ lib/libpam/modules/pam_permit \ lib/libpam/modules/pam_radius \ lib/libpam/modules/pam_rhosts \ lib/libpam/modules/pam_rootok \ lib/libpam/modules/pam_securetty \ lib/libpam/modules/pam_self \ lib/libpam/modules/pam_ssh \ lib/libpam/modules/pam_tacplus \ lib/libpam/modules/pam_unix \ lib/libpam/static_libpam \ lib/libpathconv \ lib/libpcap \ lib/libpe \ lib/libpjdlog \ lib/libproc \ lib/libprocstat \ lib/libradius \ lib/librpcsec_gss \ lib/librpcsvc \ lib/librss \ lib/librt \ lib/librtld_db \ lib/libsa \ lib/libsbuf \ lib/libsdp \ lib/libsm \ lib/libsmb \ lib/libsmdb \ lib/libsmutil \ lib/libsqlite3 \ lib/libstdbuf \ lib/libstdthreads \ lib/libsysdecode \ lib/libtacplus \ lib/libtelnet \ lib/libthr \ lib/libthread_db \ lib/libucl \ lib/libufs \ lib/libugidfw \ lib/libulog \ lib/libunbound \ lib/libusb \ lib/libusbhid \ lib/libutil \ lib/libvgl \ lib/libvmmapi \ lib/libwrap \ lib/liby \ lib/libyaml \ lib/libypclnt \ lib/libz \ lib/libzstd \ lib/msun \ lib/ncurses/form \ lib/ncurses/formw \ lib/ncurses/menu \ lib/ncurses/menuw \ lib/ncurses/ncurses \ lib/ncurses/ncursesw \ lib/ncurses/panel \ lib/ncurses/panelw \ .if ${MK_CLANG} != "no" && \ (${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "arm" || ${MACHINE_CPUARCH} == "i386") DIRDEPS+= \ lib/libclang_rt/asan-preinit \ lib/libclang_rt/asan \ lib/libclang_rt/asan_dynamic \ lib/libclang_rt/asan_cxx \ lib/libclang_rt/include \ lib/libclang_rt/profile \ lib/libclang_rt/safestack \ lib/libclang_rt/stats \ lib/libclang_rt/stats_client \ lib/libclang_rt/ubsan_standalone \ lib/libclang_rt/ubsan_standalone_cxx \ .endif .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" || \ ${MACHINE_CPUARCH} == "powerpc" DIRDEPS+= stand/libsa32 -.endif - -.if defined(LINKER_FEATURES) && ${LINKER_FEATURES:Mfilter} -DIRDEPS+= lib/libdl .endif .if ${MK_NAND} != "no" DIRDEPS+= lib/libnandfs .endif .if ${MK_CASPER} != "no" DIRDEPS+= \ lib/libcasper/libcasper \ lib/libcasper/services/cap_dns \ lib/libcasper/services/cap_grp \ lib/libcasper/services/cap_pwd \ lib/libcasper/services/cap_random \ lib/libcasper/services/cap_sysctl \ .endif .if ${MK_DIALOG} != "no" DIRDEPS+= lib/libdpv .endif .if ${MK_GPIO} != "no" DIRDEPS+= lib/libgpio .endif .if ${MK_EFI} != "no" DIRDEPS+= lib/libefivar .endif .if ${MK_LLVM_LIBUNWIND} != "no" DIRDEPS+= \ lib/libgcc_eh \ lib/libgcc_s .endif .if ${MK_PMC} != "no" DIRDEPS+= \ lib/libpmc \ lib/libpmcstat .endif .if ${MK_BLACKLIST_SUPPORT} != "no" DIRDEPS+= lib/libblacklist .endif .include Index: projects/clang700-import/tools/diag/prtblknos/main.c =================================================================== --- projects/clang700-import/tools/diag/prtblknos/main.c (revision 340426) +++ projects/clang700-import/tools/diag/prtblknos/main.c (revision 340427) @@ -1,108 +1,108 @@ /* * Copyright (c) 1998, 2003, 2013, 2018 Marshall Kirk McKusick. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include union dinode { struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; }; void prtblknos(struct uufsd *disk, union dinode *dp); int main(argc, argv) int argc; char *argv[]; { struct uufsd disk; - union dinode *dp; + union dinodep dp; struct fs *fs; struct stat sb; struct statfs sfb; char *xargv[4]; char ibuf[64]; char *fsname, *filename; ino_t inonum; int error; filename = NULL; if (argc == 2) { filename = argv[1]; if (lstat(filename, &sb) != 0) err(1, "stat(%s)", filename); if (statfs(filename, &sfb) != 0) err(1, "statfs(%s)", filename); xargv[0] = argv[0]; xargv[1] = sfb.f_mntfromname; sprintf(ibuf, "%jd", (intmax_t)sb.st_ino); xargv[2] = ibuf; xargv[3] = NULL; argv = xargv; argc = 3; } if (argc < 3) { (void)fprintf(stderr, "%s\n%s\n", "usage: prtblknos filename", " prtblknos filesystem inode ..."); exit(1); } fsname = *++argv; /* get the superblock. */ if ((error = ufs_disk_fillout(&disk, fsname)) < 0) err(1, "Cannot access file system superblock on %s", fsname); fs = (struct fs *)&disk.d_sb; /* remaining arguments are inode numbers. */ while (*++argv) { /* get the inode number. */ if ((inonum = atoi(*argv)) <= 0 || inonum >= (ino_t)fs->fs_ipg * fs->fs_ncg) warnx("%s is not a valid inode number", *argv); if (filename == NULL) (void)printf("inode #%jd: ", (intmax_t)inonum); else (void)printf("%s (inode #%jd): ", filename, (intmax_t)inonum); - if ((error = getino(&disk, (void **)&dp, inonum, NULL)) < 0) - warn("Read of inode %jd on %s failed", - (intmax_t)inonum, fsname); + if ((error = getinode(&disk, &dp, inonum)) < 0) + warn("Read of inode %jd on %s failed: %s", + (intmax_t)inonum, fsname, disk.d_error); - prtblknos(&disk, dp); + prtblknos(&disk, (union dinode *)dp.dp1); } exit(0); } Index: projects/clang700-import/usr.bin/head/Makefile =================================================================== --- projects/clang700-import/usr.bin/head/Makefile (revision 340426) +++ projects/clang700-import/usr.bin/head/Makefile (revision 340427) @@ -1,11 +1,17 @@ # @(#)Makefile 8.1 (Berkeley) 6/6/93 # $FreeBSD$ .include PROG= head HAS_TESTS= SUBDIR.${MK_TESTS}+= tests +.if ${MK_CASPER} != "no" && !defined(RESCUE) +LIBADD+= casper +LIBADD+= cap_fileargs +CFLAGS+=-DWITH_CASPER +.endif + .include Index: projects/clang700-import/usr.bin/head/head.c =================================================================== --- projects/clang700-import/usr.bin/head/head.c (revision 340426) +++ projects/clang700-import/usr.bin/head/head.c (revision 340427) @@ -1,192 +1,215 @@ /* * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1987, 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1980, 1987, 1992, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #ifndef lint #if 0 static char sccsid[] = "@(#)head.c 8.2 (Berkeley) 5/4/95"; #endif #endif /* not lint */ #include __FBSDID("$FreeBSD$"); +#include #include +#include #include #include +#include #include #include #include #include #include #include +#include +#include + /* * head - give the first few lines of a stream or of each of a set of files * * Bill Joy UCB August 24, 1977 */ static void head(FILE *, int); static void head_bytes(FILE *, off_t); static void obsolete(char *[]); static void usage(void); static const struct option long_opts[] = { {"bytes", required_argument, NULL, 'c'}, {"lines", required_argument, NULL, 'n'}, {NULL, no_argument, NULL, 0} }; int main(int argc, char *argv[]) { - int ch; FILE *fp; - int first, linecnt = -1, eval = 0; - off_t bytecnt = -1; char *ep; + off_t bytecnt; + int ch, first, linecnt, eval; + fileargs_t *fa; + cap_rights_t rights; + linecnt = -1; + eval = 0; + bytecnt = -1; + obsolete(argv); - while ((ch = getopt_long(argc, argv, "+n:c:", long_opts, NULL)) != -1) + while ((ch = getopt_long(argc, argv, "+n:c:", long_opts, NULL)) != -1) { switch(ch) { case 'c': bytecnt = strtoimax(optarg, &ep, 10); if (*ep || bytecnt <= 0) errx(1, "illegal byte count -- %s", optarg); break; case 'n': linecnt = strtol(optarg, &ep, 10); if (*ep || linecnt <= 0) errx(1, "illegal line count -- %s", optarg); break; case '?': default: usage(); + /* NOTREACHED */ } + } argc -= optind; argv += optind; + fa = fileargs_init(argc, argv, O_RDONLY, 0, + cap_rights_init(&rights, CAP_READ, CAP_FSTAT, CAP_FCNTL)); + if (fa == NULL) + errx(1, "unable to init casper"); + + caph_cache_catpages(); + if (caph_limit_stdio() < 0 || caph_enter_casper() < 0) + err(1, "unable to enter capability mode"); + if (linecnt != -1 && bytecnt != -1) errx(1, "can't combine line and byte counts"); - if (linecnt == -1 ) + if (linecnt == -1) linecnt = 10; - if (*argv) { - for (first = 1; *argv; ++argv) { - if ((fp = fopen(*argv, "r")) == NULL) { + if (*argv != NULL) { + for (first = 1; *argv != NULL; ++argv) { + if ((fp = fileargs_fopen(fa, *argv, "r")) == NULL) { warn("%s", *argv); eval = 1; continue; } if (argc > 1) { (void)printf("%s==> %s <==\n", first ? "" : "\n", *argv); first = 0; } if (bytecnt == -1) head(fp, linecnt); else head_bytes(fp, bytecnt); (void)fclose(fp); } } else if (bytecnt == -1) head(stdin, linecnt); else head_bytes(stdin, bytecnt); + fileargs_free(fa); exit(eval); } static void head(FILE *fp, int cnt) { char *cp; size_t error, readlen; - while (cnt && (cp = fgetln(fp, &readlen)) != NULL) { + while (cnt != 0 && (cp = fgetln(fp, &readlen)) != NULL) { error = fwrite(cp, sizeof(char), readlen, stdout); if (error != readlen) err(1, "stdout"); cnt--; } } static void head_bytes(FILE *fp, off_t cnt) { char buf[4096]; size_t readlen; while (cnt) { if ((uintmax_t)cnt < sizeof(buf)) readlen = cnt; else readlen = sizeof(buf); readlen = fread(buf, sizeof(char), readlen, fp); if (readlen == 0) break; if (fwrite(buf, sizeof(char), readlen, stdout) != readlen) err(1, "stdout"); cnt -= readlen; } } static void obsolete(char *argv[]) { char *ap; while ((ap = *++argv)) { /* Return if "--" or not "-[0-9]*". */ if (ap[0] != '-' || ap[1] == '-' || !isdigit(ap[1])) return; if ((ap = malloc(strlen(*argv) + 2)) == NULL) err(1, NULL); ap[0] = '-'; ap[1] = 'n'; (void)strcpy(ap + 2, *argv + 1); *argv = ap; } } static void usage(void) { (void)fprintf(stderr, "usage: head [-n lines | -c bytes] [file ...]\n"); exit(1); } Index: projects/clang700-import/usr.bin/wc/Makefile =================================================================== --- projects/clang700-import/usr.bin/wc/Makefile (revision 340426) +++ projects/clang700-import/usr.bin/wc/Makefile (revision 340427) @@ -1,7 +1,15 @@ # @(#)Makefile 8.1 (Berkeley) 6/6/93 # $FreeBSD$ +.include + PROG= wc LIBADD= xo + +.if ${MK_CASPER} != "no" +LIBADD+= casper +LIBADD+= cap_fileargs +CFLAGS+=-DWITH_CASPER +.endif .include Index: projects/clang700-import/usr.bin/wc/wc.c =================================================================== --- projects/clang700-import/usr.bin/wc/wc.c (revision 340426) +++ projects/clang700-import/usr.bin/wc/wc.c (revision 340427) @@ -1,344 +1,372 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1980, 1987, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1980, 1987, 1991, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #if 0 #ifndef lint static char sccsid[] = "@(#)wc.c 8.1 (Berkeley) 6/6/93"; #endif /* not lint */ #endif #include __FBSDID("$FreeBSD$"); +#include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include +#include + +static fileargs_t *fa; static uintmax_t tlinect, twordct, tcharct, tlongline; static int doline, doword, dochar, domulti, dolongline; static volatile sig_atomic_t siginfo; static xo_handle_t *stderr_handle; static void show_cnt(const char *file, uintmax_t linect, uintmax_t wordct, uintmax_t charct, uintmax_t llct); static int cnt(const char *); static void usage(void); static void siginfo_handler(int sig __unused) { siginfo = 1; } static void reset_siginfo(void) { signal(SIGINFO, SIG_DFL); siginfo = 0; } int main(int argc, char *argv[]) { int ch, errors, total; + cap_rights_t rights; (void) setlocale(LC_CTYPE, ""); argc = xo_parse_args(argc, argv); if (argc < 0) return (argc); while ((ch = getopt(argc, argv, "clmwL")) != -1) switch((char)ch) { case 'l': doline = 1; break; case 'w': doword = 1; break; case 'c': dochar = 1; domulti = 0; break; case 'L': dolongline = 1; break; case 'm': domulti = 1; dochar = 0; break; case '?': default: usage(); } argv += optind; argc -= optind; (void)signal(SIGINFO, siginfo_handler); + fa = fileargs_init(argc, argv, O_RDONLY, 0, + cap_rights_init(&rights, CAP_READ, CAP_FSTAT)); + if (fa == NULL) { + xo_warn("Unable to init casper"); + exit(1); + } + + caph_cache_catpages(); + if (caph_limit_stdio() < 0) { + xo_warn("Unable to limit stdio"); + fileargs_free(fa); + exit(1); + } + + if (caph_enter_casper() < 0) { + xo_warn("Unable to enter capability mode"); + fileargs_free(fa); + exit(1); + } + /* Wc's flags are on by default. */ if (doline + doword + dochar + domulti + dolongline == 0) doline = doword = dochar = 1; stderr_handle = xo_create_to_file(stderr, XO_STYLE_TEXT, 0); xo_open_container("wc"); xo_open_list("file"); errors = 0; total = 0; if (!*argv) { xo_open_instance("file"); if (cnt((char *)NULL) != 0) ++errors; xo_close_instance("file"); } else { do { xo_open_instance("file"); if (cnt(*argv) != 0) ++errors; xo_close_instance("file"); ++total; } while(*++argv); } xo_close_list("file"); if (total > 1) { xo_open_container("total"); show_cnt("total", tlinect, twordct, tcharct, tlongline); xo_close_container("total"); } + fileargs_free(fa); xo_close_container("wc"); xo_finish(); exit(errors == 0 ? 0 : 1); } static void show_cnt(const char *file, uintmax_t linect, uintmax_t wordct, uintmax_t charct, uintmax_t llct) { xo_handle_t *xop; if (!siginfo) xop = NULL; else { xop = stderr_handle; siginfo = 0; } if (doline) xo_emit_h(xop, " {:lines/%7ju/%ju}", linect); if (doword) xo_emit_h(xop, " {:words/%7ju/%ju}", wordct); if (dochar || domulti) xo_emit_h(xop, " {:characters/%7ju/%ju}", charct); if (dolongline) xo_emit_h(xop, " {:long-lines/%7ju/%ju}", llct); if (file != NULL) xo_emit_h(xop, " {:filename/%s}\n", file); else xo_emit_h(xop, "\n"); } static int cnt(const char *file) { struct stat sb; uintmax_t linect, wordct, charct, llct, tmpll; int fd, len, warned; size_t clen; short gotsp; u_char *p; u_char buf[MAXBSIZE]; wchar_t wch; mbstate_t mbs; linect = wordct = charct = llct = tmpll = 0; if (file == NULL) fd = STDIN_FILENO; - else if ((fd = open(file, O_RDONLY, 0)) < 0) { + else if ((fd = fileargs_open(fa, file)) < 0) { xo_warn("%s: open", file); return (1); } if (doword || (domulti && MB_CUR_MAX != 1)) goto word; /* * If all we need is the number of characters and it's a regular file, * just stat it. */ if (doline == 0 && dolongline == 0) { if (fstat(fd, &sb)) { xo_warn("%s: fstat", file); (void)close(fd); return (1); } if (S_ISREG(sb.st_mode)) { reset_siginfo(); charct = sb.st_size; show_cnt(file, linect, wordct, charct, llct); tcharct += charct; (void)close(fd); return (0); } } /* * For files we can't stat, or if we need line counting, slurp the * file. Line counting is split out because it's a lot faster to get * lines than to get words, since the word count requires locale * handling. */ while ((len = read(fd, buf, MAXBSIZE))) { if (len == -1) { xo_warn("%s: read", file); (void)close(fd); return (1); } if (siginfo) show_cnt(file, linect, wordct, charct, llct); charct += len; if (doline || dolongline) { for (p = buf; len--; ++p) if (*p == '\n') { if (tmpll > llct) llct = tmpll; tmpll = 0; ++linect; } else tmpll++; } } reset_siginfo(); if (doline) tlinect += linect; if (dochar) tcharct += charct; if (dolongline && llct > tlongline) tlongline = llct; show_cnt(file, linect, wordct, charct, llct); (void)close(fd); return (0); /* Do it the hard way... */ word: gotsp = 1; warned = 0; memset(&mbs, 0, sizeof(mbs)); while ((len = read(fd, buf, MAXBSIZE)) != 0) { if (len == -1) { xo_warn("%s: read", file != NULL ? file : "stdin"); (void)close(fd); return (1); } p = buf; while (len > 0) { if (siginfo) show_cnt(file, linect, wordct, charct, llct); if (!domulti || MB_CUR_MAX == 1) { clen = 1; wch = (unsigned char)*p; } else if ((clen = mbrtowc(&wch, p, len, &mbs)) == (size_t)-1) { if (!warned) { errno = EILSEQ; xo_warn("%s", file != NULL ? file : "stdin"); warned = 1; } memset(&mbs, 0, sizeof(mbs)); clen = 1; wch = (unsigned char)*p; } else if (clen == (size_t)-2) break; else if (clen == 0) clen = 1; charct++; if (wch != L'\n') tmpll++; len -= clen; p += clen; if (wch == L'\n') { if (tmpll > llct) llct = tmpll; tmpll = 0; ++linect; } if (iswspace(wch)) gotsp = 1; else if (gotsp) { gotsp = 0; ++wordct; } } } reset_siginfo(); if (domulti && MB_CUR_MAX > 1) if (mbrtowc(NULL, NULL, 0, &mbs) == (size_t)-1 && !warned) xo_warn("%s", file != NULL ? file : "stdin"); if (doline) tlinect += linect; if (doword) twordct += wordct; if (dochar || domulti) tcharct += charct; if (dolongline && llct > tlongline) tlongline = llct; show_cnt(file, linect, wordct, charct, llct); (void)close(fd); return (0); } static void usage(void) { xo_error("usage: wc [-Lclmw] [file ...]\n"); exit(1); } Index: projects/clang700-import/usr.sbin/cpucontrol/amd.c =================================================================== --- projects/clang700-import/usr.sbin/cpucontrol/amd.c (revision 340426) +++ projects/clang700-import/usr.sbin/cpucontrol/amd.c (revision 340427) @@ -1,183 +1,155 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006, 2008 Stanislav Sedov . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cpucontrol.h" #include "amd.h" int amd_probe(int fd) { char vendor[13]; int error; cpuctl_cpuid_args_t idargs = { .level = 0, }; error = ioctl(fd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl()"); return (1); } ((uint32_t *)vendor)[0] = idargs.data[1]; ((uint32_t *)vendor)[1] = idargs.data[3]; ((uint32_t *)vendor)[2] = idargs.data[2]; vendor[12] = '\0'; if (strncmp(vendor, AMD_VENDOR_ID, sizeof(AMD_VENDOR_ID)) != 0) return (1); return (0); } void -amd_update(const char *dev, const char *path) +amd_update(const struct ucode_update_params *params) { - int fd, devfd; + int devfd; unsigned int i; - struct stat st; - uint32_t *fw_image; - amd_fw_header_t *fw_header; + const char *dev, *path; + const uint32_t *fw_image; + const amd_fw_header_t *fw_header; uint32_t sum; uint32_t signature; - uint32_t *fw_data; + const uint32_t *fw_data; size_t fw_size; cpuctl_cpuid_args_t idargs = { .level = 1, /* Request signature. */ }; cpuctl_update_args_t args; int error; + dev = params->dev_path; + path = params->fw_path; + devfd = params->devfd; + fw_image = params->fwimage; + assert(path); assert(dev); - fd = -1; - fw_image = MAP_FAILED; - devfd = open(dev, O_RDWR); - if (devfd < 0) { - WARN(0, "could not open %s for writing", dev); - return; - } error = ioctl(devfd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl()"); goto fail; } signature = idargs.data[0]; WARNX(2, "found cpu family %#x model %#x " "stepping %#x extfamily %#x extmodel %#x.", (signature >> 8) & 0x0f, (signature >> 4) & 0x0f, (signature >> 0) & 0x0f, (signature >> 20) & 0xff, (signature >> 16) & 0x0f); /* * Open the firmware file. */ - fd = open(path, O_RDONLY, 0); - if (fd < 0) { - WARN(0, "open(%s)", path); - goto fail; - } - error = fstat(fd, &st); - if (error != 0) { - WARN(0, "fstat(%s)", path); - goto fail; - } - if (st.st_size < 0 || (unsigned)st.st_size < sizeof(*fw_header)) { + if (params->fwsize < sizeof(*fw_header)) { WARNX(2, "file too short: %s", path); goto fail; } - /* - * mmap the whole image. - */ - fw_image = (uint32_t *)mmap(NULL, st.st_size, PROT_READ, - MAP_PRIVATE, fd, 0); - if (fw_image == MAP_FAILED) { - WARN(0, "mmap(%s)", path); - goto fail; - } - fw_header = (amd_fw_header_t *)fw_image; + fw_header = (const amd_fw_header_t *)fw_image; if ((fw_header->magic >> 8) != AMD_MAGIC) { WARNX(2, "%s is not a valid amd firmware: version mismatch", path); goto fail; } - fw_data = (uint32_t *)(fw_header + 1); - fw_size = (st.st_size - sizeof(*fw_header)) / sizeof(uint32_t); + fw_data = (const uint32_t *)(fw_header + 1); + fw_size = (params->fwsize - sizeof(*fw_header)) / sizeof(uint32_t); /* * Check the primary checksum. */ sum = 0; for (i = 0; i < fw_size; i++) sum += fw_data[i]; if (sum != fw_header->checksum) { WARNX(2, "%s: update data checksum invalid", path); goto fail; } if (signature == fw_header->signature) { fprintf(stderr, "%s: updating cpu %s... ", path, dev); - args.data = fw_image; - args.size = st.st_size; + args.data = __DECONST(void *, fw_image); + args.size = params->fwsize; error = ioctl(devfd, CPUCTL_UPDATE, &args); if (error < 0) { fprintf(stderr, "failed.\n"); warn("ioctl()"); goto fail; } fprintf(stderr, "done.\n"); } fail: - if (fd >= 0) - close(fd); - if (devfd >= 0) - close(devfd); - if (fw_image != MAP_FAILED) - if(munmap(fw_image, st.st_size) != 0) - warn("munmap(%s)", path); return; } Index: projects/clang700-import/usr.sbin/cpucontrol/amd10h.c =================================================================== --- projects/clang700-import/usr.sbin/cpucontrol/amd10h.c (revision 340426) +++ projects/clang700-import/usr.sbin/cpucontrol/amd10h.c (revision 340427) @@ -1,317 +1,291 @@ /*- * Copyright (c) 2012 Andriy Gapon . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cpucontrol.h" #include "amd.h" int amd10h_probe(int fd) { char vendor[13]; cpuctl_cpuid_args_t idargs; uint32_t family; uint32_t signature; int error; idargs.level = 0; error = ioctl(fd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl()"); return (1); } ((uint32_t *)vendor)[0] = idargs.data[1]; ((uint32_t *)vendor)[1] = idargs.data[3]; ((uint32_t *)vendor)[2] = idargs.data[2]; vendor[12] = '\0'; if (strncmp(vendor, AMD_VENDOR_ID, sizeof(AMD_VENDOR_ID)) != 0) return (1); idargs.level = 1; error = ioctl(fd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl()"); return (1); } signature = idargs.data[0]; family = ((signature >> 8) & 0x0f) + ((signature >> 20) & 0xff); if (family < 0x10) return (1); return (0); } /* * NB: the format of microcode update files is not documented by AMD. * It has been reverse engineered from studying Coreboot, illumos and Linux * source code. */ void -amd10h_update(const char *dev, const char *path) +amd10h_update(const struct ucode_update_params *params) { - struct stat st; cpuctl_cpuid_args_t idargs; cpuctl_msr_args_t msrargs; cpuctl_update_args_t args; const amd_10h_fw_header_t *fw_header; const amd_10h_fw_header_t *selected_fw; const equiv_cpu_entry_t *equiv_cpu_table; const section_header_t *section_header; const container_header_t *container_header; const uint8_t *fw_data; - uint8_t *fw_image; + const uint8_t *fw_image; + const char *dev, *path; size_t fw_size; size_t selected_size; uint32_t revision; uint32_t new_rev; uint32_t signature; uint16_t equiv_id; - int fd, devfd; + int devfd; unsigned int i; int error; + dev = params->dev_path; + path = params->fw_path; + devfd = params->devfd; + fw_image = params->fwimage; + fw_size = params->fwsize; + assert(path); assert(dev); - fd = -1; - fw_image = MAP_FAILED; - devfd = open(dev, O_RDWR); - if (devfd < 0) { - WARN(0, "could not open %s for writing", dev); - return; - } idargs.level = 1; error = ioctl(devfd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl()"); goto done; } signature = idargs.data[0]; msrargs.msr = MSR_BIOS_SIGN; error = ioctl(devfd, CPUCTL_RDMSR, &msrargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto done; } revision = (uint32_t)msrargs.data; WARNX(1, "found cpu family %#x model %#x " "stepping %#x extfamily %#x extmodel %#x.", ((signature >> 8) & 0x0f) + ((signature >> 20) & 0xff), (signature >> 4) & 0x0f, (signature >> 0) & 0x0f, (signature >> 20) & 0xff, (signature >> 16) & 0x0f); WARNX(1, "microcode revision %#x", revision); /* * Open the firmware file. */ WARNX(1, "checking %s for update.", path); - fd = open(path, O_RDONLY, 0); - if (fd < 0) { - WARN(0, "open(%s)", path); - goto done; - } - error = fstat(fd, &st); - if (error != 0) { - WARN(0, "fstat(%s)", path); - goto done; - } - if (st.st_size < 0 || (size_t)st.st_size < + if (fw_size < (sizeof(*container_header) + sizeof(*section_header))) { WARNX(2, "file too short: %s", path); goto done; } - fw_size = st.st_size; /* * mmap the whole image. */ - fw_image = (uint8_t *)mmap(NULL, st.st_size, PROT_READ, - MAP_PRIVATE, fd, 0); - if (fw_image == MAP_FAILED) { - WARN(0, "mmap(%s)", path); - goto done; - } - fw_data = fw_image; container_header = (const container_header_t *)fw_data; if (container_header->magic != AMD_10H_MAGIC) { WARNX(2, "%s is not a valid amd firmware: bad magic", path); goto done; } fw_data += sizeof(*container_header); fw_size -= sizeof(*container_header); section_header = (const section_header_t *)fw_data; if (section_header->type != AMD_10H_EQUIV_TABLE_TYPE) { WARNX(2, "%s is not a valid amd firmware: " "first section is not CPU equivalence table", path); goto done; } if (section_header->size == 0) { WARNX(2, "%s is not a valid amd firmware: " "first section is empty", path); goto done; } fw_data += sizeof(*section_header); fw_size -= sizeof(*section_header); if (section_header->size > fw_size) { WARNX(2, "%s is not a valid amd firmware: " "file is truncated", path); goto done; } if (section_header->size < sizeof(*equiv_cpu_table)) { WARNX(2, "%s is not a valid amd firmware: " "first section is too short", path); goto done; } equiv_cpu_table = (const equiv_cpu_entry_t *)fw_data; fw_data += section_header->size; fw_size -= section_header->size; equiv_id = 0; for (i = 0; equiv_cpu_table[i].installed_cpu != 0; i++) { if (signature == equiv_cpu_table[i].installed_cpu) { equiv_id = equiv_cpu_table[i].equiv_cpu; WARNX(3, "equiv_id: %x, signature %8x," " equiv_cpu_table[%d] %8x", equiv_id, signature, i, equiv_cpu_table[i].installed_cpu); break; } } if (equiv_id == 0) { WARNX(2, "CPU is not found in the equivalence table"); goto done; } selected_fw = NULL; selected_size = 0; while (fw_size >= sizeof(*section_header)) { section_header = (const section_header_t *)fw_data; fw_data += sizeof(*section_header); fw_size -= sizeof(*section_header); if (section_header->type != AMD_10H_uCODE_TYPE) { WARNX(2, "%s is not a valid amd firmware: " "section has incorret type", path); goto done; } if (section_header->size > fw_size) { WARNX(2, "%s is not a valid amd firmware: " "file is truncated", path); goto done; } if (section_header->size < sizeof(*fw_header)) { WARNX(2, "%s is not a valid amd firmware: " "section is too short", path); goto done; } fw_header = (const amd_10h_fw_header_t *)fw_data; fw_data += section_header->size; fw_size -= section_header->size; if (fw_header->processor_rev_id != equiv_id) { WARNX(1, "firmware processor_rev_id %x, equiv_id %x", fw_header->processor_rev_id, equiv_id); continue; /* different cpu */ } if (fw_header->patch_id <= revision) { WARNX(1, "patch_id %x, revision %x", fw_header->patch_id, revision); continue; /* not newer revision */ } if (fw_header->nb_dev_id != 0 || fw_header->sb_dev_id != 0) { WARNX(2, "Chipset-specific microcode is not supported"); } WARNX(3, "selecting revision: %x", fw_header->patch_id); revision = fw_header->patch_id; selected_fw = fw_header; selected_size = section_header->size; } if (fw_size != 0) { WARNX(2, "%s is not a valid amd firmware: " "file is truncated", path); goto done; } if (selected_fw != NULL) { WARNX(1, "selected ucode size is %zu", selected_size); fprintf(stderr, "%s: updating cpu %s to revision %#x... ", path, dev, revision); args.data = __DECONST(void *, selected_fw); args.size = selected_size; error = ioctl(devfd, CPUCTL_UPDATE, &args); if (error < 0) { fprintf(stderr, "failed.\n"); warn("ioctl()"); goto done; } fprintf(stderr, "done.\n"); } msrargs.msr = MSR_BIOS_SIGN; error = ioctl(devfd, CPUCTL_RDMSR, &msrargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto done; } new_rev = (uint32_t)msrargs.data; if (new_rev != revision) WARNX(0, "revision after update %#x", new_rev); done: - if (fd >= 0) - close(fd); - if (devfd >= 0) - close(devfd); - if (fw_image != MAP_FAILED) - if (munmap(fw_image, st.st_size) != 0) - warn("munmap(%s)", path); return; } Index: projects/clang700-import/usr.sbin/cpucontrol/cpucontrol.c =================================================================== --- projects/clang700-import/usr.sbin/cpucontrol/cpucontrol.c (revision 340426) +++ projects/clang700-import/usr.sbin/cpucontrol/cpucontrol.c (revision 340427) @@ -1,516 +1,573 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008-2011 Stanislav Sedov . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This utility provides userland access to the cpuctl(4) pseudo-device * features. */ #include __FBSDID("$FreeBSD$"); #include +#include +#include +#include +#include #include #include #include #include -#include -#include #include -#include #include #include #include +#include #include #include #include #include "cpucontrol.h" #include "amd.h" #include "intel.h" #include "via.h" int verbosity_level = 0; #define DEFAULT_DATADIR "/usr/local/share/cpucontrol" #define FLAG_I 0x01 #define FLAG_M 0x02 #define FLAG_U 0x04 #define FLAG_N 0x08 #define FLAG_E 0x10 #define OP_INVAL 0x00 #define OP_READ 0x01 #define OP_WRITE 0x02 #define OP_OR 0x04 #define OP_AND 0x08 #define HIGH(val) (uint32_t)(((val) >> 32) & 0xffffffff) #define LOW(val) (uint32_t)((val) & 0xffffffff) -/* - * Macros for freeing SLISTs, probably must be in /sys/queue.h - */ -#define SLIST_FREE(head, field, freef) do { \ - typeof(SLIST_FIRST(head)) __elm0; \ - typeof(SLIST_FIRST(head)) __elm; \ - SLIST_FOREACH_SAFE(__elm, (head), field, __elm0) \ - (void)(freef)(__elm); \ -} while(0); - struct datadir { const char *path; SLIST_ENTRY(datadir) next; }; static SLIST_HEAD(, datadir) datadirs = SLIST_HEAD_INITIALIZER(datadirs); static struct ucode_handler { ucode_probe_t *probe; ucode_update_t *update; } handlers[] = { { intel_probe, intel_update }, { amd10h_probe, amd10h_update }, { amd_probe, amd_update }, { via_probe, via_update }, }; #define NHANDLERS (sizeof(handlers) / sizeof(*handlers)) static void usage(void); -static int isdir(const char *path); static int do_cpuid(const char *cmdarg, const char *dev); static int do_cpuid_count(const char *cmdarg, const char *dev); static int do_msr(const char *cmdarg, const char *dev); static int do_update(const char *dev); static void datadir_add(const char *path); static void __dead2 usage(void) { const char *name; name = getprogname(); if (name == NULL) name = "cpuctl"; fprintf(stderr, "Usage: %s [-vh] [-d datadir] [-m msr[=value] | " "-i level | -i level,level_type | -e | -u] device\n", name); exit(EX_USAGE); } static int -isdir(const char *path) -{ - int error; - struct stat st; - - error = stat(path, &st); - if (error < 0) { - WARN(0, "stat(%s)", path); - return (error); - } - return (st.st_mode & S_IFDIR); -} - -static int do_cpuid(const char *cmdarg, const char *dev) { unsigned int level; cpuctl_cpuid_args_t args; int fd, error; char *endptr; assert(cmdarg != NULL); assert(dev != NULL); level = strtoul(cmdarg, &endptr, 16); if (*cmdarg == '\0' || *endptr != '\0') { WARNX(0, "incorrect operand: %s", cmdarg); usage(); /* NOTREACHED */ } /* * Fill ioctl argument structure. */ args.level = level; fd = open(dev, O_RDONLY); if (fd < 0) { WARN(0, "error opening %s for reading", dev); return (1); } error = ioctl(fd, CPUCTL_CPUID, &args); if (error < 0) { WARN(0, "ioctl(%s, CPUCTL_CPUID)", dev); close(fd); return (error); } fprintf(stdout, "cpuid level 0x%x: 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n", level, args.data[0], args.data[1], args.data[2], args.data[3]); close(fd); return (0); } static int do_cpuid_count(const char *cmdarg, const char *dev) { char *cmdarg1, *endptr, *endptr1; unsigned int level, level_type; cpuctl_cpuid_count_args_t args; int fd, error; assert(cmdarg != NULL); assert(dev != NULL); level = strtoul(cmdarg, &endptr, 16); if (*cmdarg == '\0' || *endptr == '\0') { WARNX(0, "incorrect or missing operand: %s", cmdarg); usage(); /* NOTREACHED */ } /* Locate the comma... */ cmdarg1 = strstr(endptr, ","); /* ... and skip past it */ cmdarg1 += 1; level_type = strtoul(cmdarg1, &endptr1, 16); if (*cmdarg1 == '\0' || *endptr1 != '\0') { WARNX(0, "incorrect or missing operand: %s", cmdarg); usage(); /* NOTREACHED */ } /* * Fill ioctl argument structure. */ args.level = level; args.level_type = level_type; fd = open(dev, O_RDONLY); if (fd < 0) { WARN(0, "error opening %s for reading", dev); return (1); } error = ioctl(fd, CPUCTL_CPUID_COUNT, &args); if (error < 0) { WARN(0, "ioctl(%s, CPUCTL_CPUID_COUNT)", dev); close(fd); return (error); } fprintf(stdout, "cpuid level 0x%x, level_type 0x%x: 0x%.8x 0x%.8x " "0x%.8x 0x%.8x\n", level, level_type, args.data[0], args.data[1], args.data[2], args.data[3]); close(fd); return (0); } static int do_msr(const char *cmdarg, const char *dev) { unsigned int msr; cpuctl_msr_args_t args; size_t len; uint64_t data = 0; unsigned long command; int do_invert = 0, op; int fd, error; const char *command_name; char *endptr; char *p; assert(cmdarg != NULL); assert(dev != NULL); len = strlen(cmdarg); if (len == 0) { WARNX(0, "MSR register expected"); usage(); /* NOTREACHED */ } /* * Parse command string. */ msr = strtoul(cmdarg, &endptr, 16); switch (*endptr) { case '\0': op = OP_READ; break; case '=': op = OP_WRITE; break; case '&': op = OP_AND; endptr++; break; case '|': op = OP_OR; endptr++; break; default: op = OP_INVAL; } if (op != OP_READ) { /* Complex operation. */ if (*endptr != '=') op = OP_INVAL; else { p = ++endptr; if (*p == '~') { do_invert = 1; p++; } data = strtoull(p, &endptr, 16); if (*p == '\0' || *endptr != '\0') { WARNX(0, "argument required: %s", cmdarg); usage(); /* NOTREACHED */ } } } if (op == OP_INVAL) { WARNX(0, "invalid operator: %s", cmdarg); usage(); /* NOTREACHED */ } /* * Fill ioctl argument structure. */ args.msr = msr; if ((do_invert != 0) ^ (op == OP_AND)) args.data = ~data; else args.data = data; switch (op) { case OP_READ: command = CPUCTL_RDMSR; command_name = "RDMSR"; break; case OP_WRITE: command = CPUCTL_WRMSR; command_name = "WRMSR"; break; case OP_OR: command = CPUCTL_MSRSBIT; command_name = "MSRSBIT"; break; case OP_AND: command = CPUCTL_MSRCBIT; command_name = "MSRCBIT"; break; default: abort(); } fd = open(dev, op == OP_READ ? O_RDONLY : O_WRONLY); if (fd < 0) { WARN(0, "error opening %s for %s", dev, op == OP_READ ? "reading" : "writing"); return (1); } error = ioctl(fd, command, &args); if (error < 0) { WARN(0, "ioctl(%s, CPUCTL_%s (%lu))", dev, command_name, command); close(fd); return (1); } if (op == OP_READ) fprintf(stdout, "MSR 0x%x: 0x%.8x 0x%.8x\n", msr, HIGH(args.data), LOW(args.data)); close(fd); return (0); } static int do_eval_cpu_features(const char *dev) { int fd, error; assert(dev != NULL); fd = open(dev, O_RDWR); if (fd < 0) { WARN(0, "error opening %s for writing", dev); return (1); } error = ioctl(fd, CPUCTL_EVAL_CPU_FEATURES, NULL); if (error < 0) WARN(0, "ioctl(%s, CPUCTL_EVAL_CPU_FEATURES)", dev); close(fd); return (error); } static int +try_a_fw_image(const char *dev_path, int devfd, int fwdfd, const char *dpath, + const char *fname, struct ucode_handler *handler) +{ + struct ucode_update_params parm; + struct stat st; + char *fw_path; + void *fw_map; + int fwfd, rc; + + rc = 0; + fw_path = NULL; + fw_map = MAP_FAILED; + fwfd = openat(fwdfd, fname, O_RDONLY); + if (fwfd < 0) { + WARN(0, "openat(%s, %s)", dpath, fname); + goto out; + } + + rc = asprintf(&fw_path, "%s/%s", dpath, fname); + if (rc == -1) { + WARNX(0, "out of memory"); + rc = ENOMEM; + goto out; + } + + rc = fstat(fwfd, &st); + if (rc != 0) { + WARN(0, "fstat(%s)", fw_path); + rc = 0; + goto out; + } + if (st.st_size <= 0) { + WARN(0, "%s: empty", fw_path); + goto out; + } + + fw_map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fwfd, 0); + if (fw_map == MAP_FAILED) { + WARN(0, "mmap(%s)", fw_path); + goto out; + } + + + memset(&parm, 0, sizeof(parm)); + parm.devfd = devfd; + parm.fwimage = fw_map; + parm.fwsize = st.st_size; + parm.dev_path = dev_path; + parm.fw_path = fw_path; + + handler->update(&parm); + +out: + if (fw_map != MAP_FAILED) + munmap(fw_map, st.st_size); + free(fw_path); + if (fwfd >= 0) + close(fwfd); + return (rc); +} + +static int do_update(const char *dev) { - int fd; + int fd, fwdfd; unsigned int i; int error; struct ucode_handler *handler; struct datadir *dir; DIR *dirp; struct dirent *direntry; - char buf[MAXPATHLEN]; fd = open(dev, O_RDONLY); if (fd < 0) { WARN(0, "error opening %s for reading", dev); return (1); } /* - * Find the appropriate handler for device. + * Find the appropriate handler for CPU. */ for (i = 0; i < NHANDLERS; i++) if (handlers[i].probe(fd) == 0) break; if (i < NHANDLERS) handler = &handlers[i]; else { - WARNX(0, "cannot find the appropriate handler for device"); + WARNX(0, "cannot find the appropriate handler for %s", dev); close(fd); return (1); } close(fd); + fd = open(dev, O_RDWR); + if (fd < 0) { + WARN(0, "error opening %s for writing", dev); + return (1); + } + /* * Process every image in specified data directories. */ SLIST_FOREACH(dir, &datadirs, next) { - dirp = opendir(dir->path); - if (dirp == NULL) { - WARNX(1, "skipping directory %s: not accessible", dir->path); + fwdfd = open(dir->path, O_RDONLY); + if (fwdfd < 0) { + WARN(1, "skipping directory %s: not accessible", dir->path); continue; } + dirp = fdopendir(fwdfd); + if (dirp == NULL) { + WARNX(0, "out of memory"); + close(fwdfd); + close(fd); + return (1); + } + while ((direntry = readdir(dirp)) != NULL) { if (direntry->d_namlen == 0) continue; - error = snprintf(buf, sizeof(buf), "%s/%s", dir->path, - direntry->d_name); - if ((unsigned)error >= sizeof(buf)) - WARNX(0, "skipping %s, buffer too short", - direntry->d_name); - if (isdir(buf) != 0) { - WARNX(2, "skipping %s: is a directory", buf); + if (direntry->d_type == DT_DIR) continue; + + error = try_a_fw_image(dev, fd, fwdfd, dir->path, + direntry->d_name, handler); + if (error != 0) { + closedir(dirp); + close(fd); + return (1); } - handler->update(dev, buf); } error = closedir(dirp); if (error != 0) WARN(0, "closedir(%s)", dir->path); } + close(fd); return (0); } /* * Add new data directory to the search list. */ static void datadir_add(const char *path) { struct datadir *newdir; newdir = (struct datadir *)malloc(sizeof(*newdir)); if (newdir == NULL) err(EX_OSERR, "cannot allocate memory"); newdir->path = path; SLIST_INSERT_HEAD(&datadirs, newdir, next); } int main(int argc, char *argv[]) { + struct datadir *elm; int c, flags; const char *cmdarg; const char *dev; int error; flags = 0; error = 0; cmdarg = ""; /* To keep gcc3 happy. */ while ((c = getopt(argc, argv, "d:ehi:m:nuv")) != -1) { switch (c) { case 'd': datadir_add(optarg); break; case 'e': flags |= FLAG_E; break; case 'i': flags |= FLAG_I; cmdarg = optarg; break; case 'm': flags |= FLAG_M; cmdarg = optarg; break; case 'n': flags |= FLAG_N; break; case 'u': flags |= FLAG_U; break; case 'v': verbosity_level++; break; case 'h': /* FALLTHROUGH */ default: usage(); /* NOTREACHED */ } } argc -= optind; argv += optind; if (argc < 1) { usage(); /* NOTREACHED */ } if ((flags & FLAG_N) == 0) datadir_add(DEFAULT_DATADIR); dev = argv[0]; c = flags & (FLAG_E | FLAG_I | FLAG_M | FLAG_U); switch (c) { case FLAG_I: if (strstr(cmdarg, ",") != NULL) error = do_cpuid_count(cmdarg, dev); else error = do_cpuid(cmdarg, dev); break; case FLAG_M: error = do_msr(cmdarg, dev); break; case FLAG_U: error = do_update(dev); break; case FLAG_E: error = do_eval_cpu_features(dev); break; default: usage(); /* Only one command can be selected. */ } - SLIST_FREE(&datadirs, next, free); + while ((elm = SLIST_FIRST(&datadirs)) != NULL) { + SLIST_REMOVE_HEAD(&datadirs, next); + free(elm); + } return (error == 0 ? 0 : 1); } Index: projects/clang700-import/usr.sbin/cpucontrol/cpucontrol.h =================================================================== --- projects/clang700-import/usr.sbin/cpucontrol/cpucontrol.h (revision 340426) +++ projects/clang700-import/usr.sbin/cpucontrol/cpucontrol.h (revision 340427) @@ -1,58 +1,67 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Stanislav Sedov . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef CPUCONTROL_H #define CPUCONTROL_H +#include + typedef int ucode_probe_t(int fd); -typedef void ucode_update_t(const char *dev, const char *image); +struct ucode_update_params { + int devfd; /* RDWR handle to cpucontrol device */ + const void *fwimage; /* READ mapping of firmware image */ + size_t fwsize; /* Non-zero size of firmware image */ + const char *dev_path; /* cpucontrol device path, for logging */ + const char *fw_path; /* firmware image path, for logging */ +}; +typedef void ucode_update_t(const struct ucode_update_params *params); extern int verbosity_level; #ifdef DEBUG # define WARNX(level, ...) \ if ((level) <= verbosity_level) { \ fprintf(stderr, "%s:%d ", __FILE__, __LINE__); \ warnx(__VA_ARGS__); \ } # define WARN(level, ...) \ if ((level) <= verbosity_level) { \ fprintf(stderr, "%s:%d ", __FILE__, __LINE__); \ warn(__VA_ARGS__); \ } #else # define WARNX(level, ...) \ if ((level) <= verbosity_level) \ warnx(__VA_ARGS__); # define WARN(level, ...) \ if ((level) <= verbosity_level) \ warn(__VA_ARGS__); #endif #endif /* !CPUCONTROL_H */ Index: projects/clang700-import/usr.sbin/cpucontrol/intel.c =================================================================== --- projects/clang700-import/usr.sbin/cpucontrol/intel.c (revision 340426) +++ projects/clang700-import/usr.sbin/cpucontrol/intel.c (revision 340427) @@ -1,296 +1,270 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006, 2008 Stanislav Sedov . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cpucontrol.h" #include "intel.h" #define DEFAULT_UCODE_SIZE 2000 /* Size of update data if not specified. */ int intel_probe(int fd) { char vendor[13]; int error; cpuctl_cpuid_args_t idargs = { .level = 0, }; error = ioctl(fd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl()"); return (1); } ((uint32_t *)vendor)[0] = idargs.data[1]; ((uint32_t *)vendor)[1] = idargs.data[3]; ((uint32_t *)vendor)[2] = idargs.data[2]; vendor[12] = '\0'; if (strncmp(vendor, INTEL_VENDOR_ID, sizeof(INTEL_VENDOR_ID)) != 0) return (1); return (0); } void -intel_update(const char *dev, const char *path) +intel_update(const struct ucode_update_params *params) { - int fd, devfd; - struct stat st; - uint32_t *fw_image; + int devfd; + const char *dev, *path; + const uint32_t *fw_image; int have_ext_table; uint32_t sum; unsigned int i; size_t payload_size; - intel_fw_header_t *fw_header; - intel_cpu_signature_t *ext_table; - intel_ext_header_t *ext_header; + const intel_fw_header_t *fw_header; + const intel_cpu_signature_t *ext_table; + const intel_ext_header_t *ext_header; uint32_t sig, signature, flags; int32_t revision; ssize_t ext_size; size_t ext_table_size; - void *fw_data; + const void *fw_data; size_t data_size, total_size; cpuctl_msr_args_t msrargs = { .msr = MSR_BIOS_SIGN, .data = 0, }; cpuctl_cpuid_args_t idargs = { .level = 1, /* Signature. */ }; cpuctl_update_args_t args; int error; + dev = params->dev_path; + path = params->fw_path; + devfd = params->devfd; + fw_image = params->fwimage; + assert(path); assert(dev); - fd = -1; - fw_image = MAP_FAILED; ext_table = NULL; ext_header = NULL; - devfd = open(dev, O_RDWR); - if (devfd < 0) { - WARN(0, "could not open %s for writing", dev); - return; - } + error = ioctl(devfd, CPUCTL_WRMSR, &msrargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto fail; } error = ioctl(devfd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto fail; } signature = idargs.data[0]; msrargs.msr = MSR_IA32_PLATFORM_ID; error = ioctl(devfd, CPUCTL_RDMSR, &msrargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto fail; } /* * MSR_IA32_PLATFORM_ID contains flag in BCD in bits 52-50. */ flags = 1 << ((msrargs.data >> 50) & 7); msrargs.msr = MSR_BIOS_SIGN; error = ioctl(devfd, CPUCTL_RDMSR, &msrargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto fail; } revision = msrargs.data >> 32; /* Revision in the high dword. */ WARNX(2, "found cpu type %#x family %#x model %#x stepping %#x.", (signature >> 12) & 0x03, (signature >> 8) & 0x0f, (signature >> 4) & 0x0f, (signature >> 0) & 0x0f); /* * Open firmware image. */ - fd = open(path, O_RDONLY, 0); - if (fd < 0) { - WARN(0, "open(%s)", path); - goto fail; - } - error = fstat(fd, &st); - if (error != 0) { - WARN(0, "fstat(%s)", path); - goto fail; - } - if (st.st_size < 0 || (unsigned)st.st_size < sizeof(*fw_header)) { + if (params->fwsize < sizeof(*fw_header)) { WARNX(2, "file too short: %s", path); goto fail; } - /* - * mmap the whole image. - */ - fw_image = (uint32_t *)mmap(NULL, st.st_size, PROT_READ, - MAP_PRIVATE, fd, 0); - if (fw_image == MAP_FAILED) { - WARN(0, "mmap(%s)", path); - goto fail; - } - fw_header = (intel_fw_header_t *)fw_image; + fw_header = (const intel_fw_header_t *)fw_image; if (fw_header->header_version != INTEL_HEADER_VERSION || fw_header->loader_revision != INTEL_LOADER_REVISION) { WARNX(2, "%s is not a valid intel firmware: version mismatch", path); goto fail; } /* * According to spec, if data_size == 0, then size of ucode = 2000. */ if (fw_header->data_size == 0) data_size = DEFAULT_UCODE_SIZE; else data_size = fw_header->data_size; if (fw_header->total_size == 0) total_size = data_size + sizeof(*fw_header); else total_size = fw_header->total_size; - if (total_size > (unsigned)st.st_size || st.st_size < 0) { + if (total_size > params->fwsize) { WARNX(2, "file too short: %s", path); goto fail; } payload_size = data_size + sizeof(*fw_header); /* * Check the primary checksum. */ sum = 0; for (i = 0; i < (payload_size / sizeof(uint32_t)); i++) - sum += *((uint32_t *)fw_image + i); + sum += *((const uint32_t *)fw_image + i); if (sum != 0) { WARNX(2, "%s: update data checksum invalid", path); goto fail; } /* * Check if there is an extended signature table. */ ext_size = total_size - payload_size; have_ext_table = 0; if (ext_size > (signed)sizeof(*ext_header)) { - ext_header = - (intel_ext_header_t *)((char *)fw_image + payload_size); - ext_table = (intel_cpu_signature_t *)(ext_header + 1); + ext_header = (const intel_ext_header_t *) + ((const char *)fw_image + payload_size); + ext_table = (const intel_cpu_signature_t *)(ext_header + 1); /* * Check the extended table size. */ ext_table_size = sizeof(*ext_header) + ext_header->sig_count * sizeof(*ext_table); if (ext_table_size + payload_size > total_size) { WARNX(2, "%s: broken extended signature table", path); goto no_table; } /* * Check the extended table signature. */ sum = 0; for (i = 0; i < (ext_table_size / sizeof(uint32_t)); i++) - sum += *((uint32_t *)ext_header + i); + sum += *((const uint32_t *)ext_header + i); if (sum != 0) { WARNX(2, "%s: extended signature table checksum invalid", path); goto no_table; } have_ext_table = 1; } no_table: fw_data = fw_header + 1; /* Pointer to the update data. */ /* * Check if the given image is ok for this cpu. */ if (signature == fw_header->cpu_signature && (flags & fw_header->cpu_flags) != 0) goto matched; else if (have_ext_table != 0) { for (i = 0; i < ext_header->sig_count; i++) { sig = ext_table[i].cpu_signature; if (signature == sig && (flags & ext_table[i].cpu_flags) != 0) goto matched; } } else goto fail; matched: if (revision >= fw_header->revision) { WARNX(1, "skipping %s of rev %#x: up to date", path, fw_header->revision); goto fail; } fprintf(stderr, "%s: updating cpu %s from rev %#x to rev %#x... ", path, dev, revision, fw_header->revision); - args.data = fw_data; + args.data = __DECONST(void *, fw_data); args.size = data_size; error = ioctl(devfd, CPUCTL_UPDATE, &args); if (error < 0) { error = errno; fprintf(stderr, "failed.\n"); errno = error; WARN(0, "ioctl()"); goto fail; } fprintf(stderr, "done.\n"); fail: - if (fw_image != MAP_FAILED) - if (munmap(fw_image, st.st_size) != 0) - warn("munmap(%s)", path); - if (devfd >= 0) - close(devfd); - if (fd >= 0) - close(fd); + return; } Index: projects/clang700-import/usr.sbin/cpucontrol/via.c =================================================================== --- projects/clang700-import/usr.sbin/cpucontrol/via.c (revision 340426) +++ projects/clang700-import/usr.sbin/cpucontrol/via.c (revision 340427) @@ -1,226 +1,195 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011 Fabien Thomas . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cpucontrol.h" #include "via.h" int via_probe(int fd) { char vendor[13]; int error; cpuctl_cpuid_args_t idargs = { .level = 0, }; error = ioctl(fd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl()"); return (1); } ((uint32_t *)vendor)[0] = idargs.data[1]; ((uint32_t *)vendor)[1] = idargs.data[3]; ((uint32_t *)vendor)[2] = idargs.data[2]; vendor[12] = '\0'; if (strncmp(vendor, CENTAUR_VENDOR_ID, sizeof(CENTAUR_VENDOR_ID)) != 0) return (1); /* TODO: detect Nano CPU. */ return (0); } void -via_update(const char *dev, const char *path) +via_update(const struct ucode_update_params *params) { - int fd, devfd; - struct stat st; - uint32_t *fw_image; + int devfd; + const char *dev, *path; + const uint32_t *fw_image; uint32_t sum; unsigned int i; size_t payload_size; - via_fw_header_t *fw_header; + const via_fw_header_t *fw_header; uint32_t signature; int32_t revision; - void *fw_data; + const void *fw_data; size_t data_size, total_size; cpuctl_msr_args_t msrargs = { .msr = MSR_IA32_PLATFORM_ID, }; cpuctl_cpuid_args_t idargs = { .level = 1, /* Signature. */ }; cpuctl_update_args_t args; int error; + dev = params->dev_path; + path = params->fw_path; + devfd = params->devfd; + fw_image = params->fwimage; + assert(path); assert(dev); - fd = -1; - devfd = -1; - fw_image = MAP_FAILED; - devfd = open(dev, O_RDWR); - if (devfd < 0) { - WARN(0, "could not open %s for writing", dev); - return; - } error = ioctl(devfd, CPUCTL_CPUID, &idargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto fail; } signature = idargs.data[0]; error = ioctl(devfd, CPUCTL_RDMSR, &msrargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto fail; } /* * MSR_IA32_PLATFORM_ID contains flag in BCD in bits 52-50. */ msrargs.msr = MSR_BIOS_SIGN; error = ioctl(devfd, CPUCTL_RDMSR, &msrargs); if (error < 0) { WARN(0, "ioctl(%s)", dev); goto fail; } revision = msrargs.data >> 32; /* Revision in the high dword. */ WARNX(2, "found cpu type %#x family %#x model %#x stepping %#x.", (signature >> 12) & 0x03, (signature >> 8) & 0x0f, (signature >> 4) & 0x0f, (signature >> 0) & 0x0f); - /* - * Open firmware image. - */ - fd = open(path, O_RDONLY, 0); - if (fd < 0) { - WARN(0, "open(%s)", path); - goto fail; - } - error = fstat(fd, &st); - if (error != 0) { - WARN(0, "fstat(%s)", path); - goto fail; - } - if (st.st_size < 0 || (unsigned)st.st_size < sizeof(*fw_header)) { + + if (params->fwsize < sizeof(*fw_header)) { WARNX(2, "file too short: %s", path); goto fail; } - /* - * mmap the whole image. - */ - fw_image = (uint32_t *)mmap(NULL, st.st_size, PROT_READ, - MAP_PRIVATE, fd, 0); - if (fw_image == MAP_FAILED) { - WARN(0, "mmap(%s)", path); - goto fail; - } - fw_header = (via_fw_header_t *)fw_image; + fw_header = (const via_fw_header_t *)fw_image; if (fw_header->signature != VIA_HEADER_SIGNATURE || fw_header->loader_revision != VIA_LOADER_REVISION) { WARNX(2, "%s is not a valid via firmware: version mismatch", path); goto fail; } data_size = fw_header->data_size; total_size = fw_header->total_size; - if (total_size > (unsigned)st.st_size || st.st_size < 0) { + if (total_size > params->fwsize) { WARNX(2, "file too short: %s", path); goto fail; } payload_size = data_size + sizeof(*fw_header); /* * Check the primary checksum. */ sum = 0; for (i = 0; i < (payload_size / sizeof(uint32_t)); i++) - sum += *((uint32_t *)fw_image + i); + sum += *((const uint32_t *)fw_image + i); if (sum != 0) { WARNX(2, "%s: update data checksum invalid", path); goto fail; } fw_data = fw_header + 1; /* Pointer to the update data. */ /* * Check if the given image is ok for this cpu. */ if (signature != fw_header->cpu_signature) goto fail; if (fw_header->revision != 0 && revision >= fw_header->revision) { WARNX(1, "skipping %s of rev %#x: up to date", path, fw_header->revision); goto fail; } fprintf(stderr, "%s: updating cpu %s from rev %#x to rev %#x... ", path, dev, revision, fw_header->revision); - args.data = fw_data; + args.data = __DECONST(void *, fw_data); args.size = data_size; error = ioctl(devfd, CPUCTL_UPDATE, &args); if (error < 0) { error = errno; - fprintf(stderr, "failed.\n"); + fprintf(stderr, "failed.\n"); errno = error; - WARN(0, "ioctl()"); - goto fail; + WARN(0, "ioctl()"); + goto fail; } fprintf(stderr, "done.\n"); fail: - if (fw_image != MAP_FAILED) - if (munmap(fw_image, st.st_size) != 0) - warn("munmap(%s)", path); - if (devfd >= 0) - close(devfd); - if (fd >= 0) - close(fd); return; } Index: projects/clang700-import =================================================================== --- projects/clang700-import (revision 340426) +++ projects/clang700-import (revision 340427) Property changes on: projects/clang700-import ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r340368-340426