Index: projects/runtime-coverage-v2/Makefile =================================================================== --- projects/runtime-coverage-v2/Makefile (revision 346057) +++ projects/runtime-coverage-v2/Makefile (revision 346058) @@ -1,761 +1,761 @@ # # $FreeBSD$ # # The user-driven targets are: # # universe - *Really* build *everything* (buildworld and # all kernels on all architectures). # tinderbox - Same as universe, but presents a list of failed build # targets and exits with an error if there were any. # buildworld - Rebuild *everything*, including glue to help do # upgrades. # installworld - Install everything built by "buildworld". # world - buildworld + installworld, no kernel. # buildkernel - Rebuild the kernel and the kernel-modules. # installkernel - Install the kernel and the kernel-modules. # installkernel.debug # reinstallkernel - Reinstall the kernel and the kernel-modules. # reinstallkernel.debug # kernel - buildkernel + installkernel. # kernel-toolchain - Builds the subset of world necessary to build a kernel # kernel-toolchains - Build kernel-toolchain for all universe targets. # doxygen - Build API documentation of the kernel, needs doxygen. # update - Convenient way to update your source tree(s). # checkworld - Run test suite on installed world. # check-old - List obsolete directories/files/libraries. # check-old-dirs - List obsolete directories. # check-old-files - List obsolete files. # check-old-libs - List obsolete libraries. # delete-old - Delete obsolete directories/files. # delete-old-dirs - Delete obsolete directories. # delete-old-files - Delete obsolete files. # delete-old-libs - Delete obsolete libraries. # targets - Print a list of supported TARGET/TARGET_ARCH pairs # for world and kernel targets. # toolchains - Build a toolchain for all world and kernel targets. # sysent - (Re)build syscall entries from syscalls.master. # xdev - xdev-build + xdev-install for the architecture # specified with TARGET and TARGET_ARCH. # xdev-build - Build cross-development tools. # xdev-install - Install cross-development tools. # xdev-links - Create traditional links in /usr/bin for cc, etc # native-xtools - Create host binaries that produce target objects # for use in qemu user-mode jails. TARGET and # TARGET_ARCH should be defined. # native-xtools-install # - Install the files to the given DESTDIR/NXTP where # NXTP defaults to /nxb-bin. # # "quick" way to test all kernel builds: # _jflag=`sysctl -n hw.ncpu` # _jflag=$(($_jflag * 2)) # [ $_jflag -gt 12 ] && _jflag=12 # make universe -DMAKE_JUST_KERNELS JFLAG=-j${_jflag} # # This makefile is simple by design. The FreeBSD make automatically reads # the /usr/share/mk/sys.mk unless the -m argument is specified on the # command line. By keeping this makefile simple, it doesn't matter too # much how different the installed mk files are from those in the source # tree. This makefile executes a child make process, forcing it to use # the mk files from the source tree which are supposed to DTRT. # # Most of the user-driven targets (as listed above) are implemented in # Makefile.inc1. The exceptions are universe, tinderbox and targets. # # If you want to build your system from source, be sure that /usr/obj has # at least 6 GB of disk space available. A complete 'universe' build of # r340283 (2018-11) required 167 GB of space. ZFS lz4 compression # achieved a 2.18x ratio, reducing actual space to 81 GB. # # For individuals wanting to build from the sources currently on their # system, the simple instructions are: # # 1. `cd /usr/src' (or to the directory containing your source tree). # 2. Define `HISTORICAL_MAKE_WORLD' variable (see README). # 3. `make world' # # For individuals wanting to upgrade their sources (even if only a # delta of a few days): # # 1. `cd /usr/src' (or to the directory containing your source tree). # 2. `make buildworld' # 3. `make buildkernel KERNCONF=YOUR_KERNEL_HERE' (default is GENERIC). # 4. `make installkernel KERNCONF=YOUR_KERNEL_HERE' (default is GENERIC). # [steps 3. & 4. can be combined by using the "kernel" target] # 5. `reboot' (in single user mode: boot -s from the loader prompt). # 6. `mergemaster -p' # 7. `make installworld' # 8. `mergemaster' (you may wish to use -i, along with -U or -F). # 9. `make delete-old' # 10. `reboot' # 11. `make delete-old-libs' (in case no 3rd party program uses them anymore) # # See src/UPDATING `COMMON ITEMS' for more complete information. # # If TARGET=machine (e.g. powerpc, sparc64, ...) is specified you can # cross build world for other machine types using the buildworld target, # and once the world is built you can cross build a kernel using the # buildkernel target. # # Define the user-driven targets. These are listed here in alphabetical # order, but that's not important. # # Targets that begin with underscore are internal targets intended for # developer convenience only. They are intentionally not documented and # completely subject to change without notice. # # For more information, see the build(7) manual page. # # This is included so CC is set to ccache for -V, and COMPILER_TYPE/VERSION # can be cached for sub-makes. We can't do this while still running on the # old fmake from FreeBSD 9.x or older, so avoid including it then to avoid # heartburn upgrading from older systems. The need for CC is done with new # make later in the build, and caching COMPILER_TYPE/VERSION is only an # optimization. Also sinclude it to be friendlier to foreign OS hosted builds. .if ${MAKE_VERSION} >= 20140620 && defined(.PARSEDIR) .sinclude .endif # Note: we use this awkward construct to be compatible with FreeBSD's # old make used in 10.0 and 9.2 and earlier. .if defined(MK_DIRDEPS_BUILD) && ${MK_DIRDEPS_BUILD} == "yes" && \ !make(showconfig) && !make(print-dir) # targets/Makefile plays the role of top-level .include "targets/Makefile" .else TGTS= all all-man buildenv buildenvvars buildkernel buildworld \ check check-old check-old-dirs check-old-files check-old-libs \ checkdpadd checkworld clean cleandepend cleandir cleanworld \ cleanuniverse \ delete-old delete-old-dirs delete-old-files delete-old-libs \ depend distribute distributekernel distributekernel.debug \ distributeworld distrib-dirs distribution doxygen \ everything hier hierarchy install installcheck installkernel \ installkernel.debug packagekernel packageworld \ reinstallkernel reinstallkernel.debug \ installworld kernel-toolchain libraries maninstall \ obj objlink showconfig tags toolchain update \ sysent \ _worldtmp _legacy _bootstrap-tools _cleanobj _obj \ _build-tools _build-metadata _cross-tools _includes _libraries \ build32 distribute32 install32 buildsoft distributesoft installsoft \ builddtb xdev xdev-build xdev-install \ xdev-links native-xtools native-xtools-install stageworld stagekernel \ stage-packages \ create-packages-world create-packages-kernel create-packages \ packages installconfig real-packages sign-packages package-pkg \ print-dir test-system-compiler test-system-linker # These targets require a TARGET and TARGET_ARCH be defined. XTGTS= native-xtools native-xtools-install xdev xdev-build xdev-install \ xdev-links # XXX: r156740: This can't work since bsd.subdir.mk is not included ever. # It will only work for SUBDIR_TARGETS in make.conf. TGTS+= ${SUBDIR_TARGETS} BITGTS= files includes BITGTS:=${BITGTS} ${BITGTS:S/^/build/} ${BITGTS:S/^/install/} TGTS+= ${BITGTS} # Only some targets are allowed to use meta mode. Others get it # disabled. In some cases, such as 'install', meta mode can be dangerous # as a cookie may be used to prevent redundant installations (such as # for WORLDTMP staging). For DESTDIR=/ we always want to install though. # For other cases, such as delete-old-libs, meta mode may break # the interactive tty prompt. The safest route is to just whitelist # the ones that benefit from it. META_TGT_WHITELIST+= \ _* build32 buildfiles buildincludes buildkernel buildsoft \ buildworld everything kernel-toolchain kernel-toolchains kernel \ kernels libraries native-xtools showconfig test-system-compiler \ test-system-linker tinderbox toolchain \ toolchains universe universe-toolchain world worlds xdev xdev-build .ORDER: buildworld installworld .ORDER: buildworld distrib-dirs .ORDER: buildworld distribution .ORDER: buildworld distribute .ORDER: buildworld distributeworld .ORDER: buildworld buildkernel .ORDER: distrib-dirs distribute .ORDER: distrib-dirs distributeworld .ORDER: distrib-dirs installworld .ORDER: distribution distribute .ORDER: distributeworld distribute .ORDER: distributeworld distribution .ORDER: installworld distribute .ORDER: installworld distribution .ORDER: installworld installkernel .ORDER: buildkernel installkernel .ORDER: buildkernel installkernel.debug .ORDER: buildkernel reinstallkernel .ORDER: buildkernel reinstallkernel.debug PATH= /sbin:/bin:/usr/sbin:/usr/bin MAKEOBJDIRPREFIX?= /usr/obj _MAKEOBJDIRPREFIX!= /usr/bin/env -i PATH=${PATH} ${MAKE} MK_AUTO_OBJ=no \ ${.MAKEFLAGS:MMAKEOBJDIRPREFIX=*} __MAKE_CONF=${__MAKE_CONF} \ SRCCONF=${SRCCONF} SRC_ENV_CONF= \ -f /dev/null -V MAKEOBJDIRPREFIX dummy .if !empty(_MAKEOBJDIRPREFIX) .error MAKEOBJDIRPREFIX can only be set in environment or src-env.conf(5),\ not as a global (in make.conf(5) or src.conf(5)) or command-line variable. .endif # We often need to use the tree's version of make to build it. # Choices add to complexity though. # We cannot blindly use a make which may not be the one we want # so be explicit - until all choice is removed. WANT_MAKE= bmake .if !empty(.MAKE.MODE:Mmeta) # 20160604 - support missing-meta,missing-filemon and performance improvements WANT_MAKE_VERSION= 20160604 .else # 20160220 - support .dinclude for FAST_DEPEND. WANT_MAKE_VERSION= 20160220 .endif MYMAKE= ${OBJROOT}make.${MACHINE}/${WANT_MAKE} .if defined(.PARSEDIR) HAVE_MAKE= bmake .else HAVE_MAKE= fmake .endif .if defined(ALWAYS_BOOTSTRAP_MAKE) || \ ${HAVE_MAKE} != ${WANT_MAKE} || \ (defined(WANT_MAKE_VERSION) && ${MAKE_VERSION} < ${WANT_MAKE_VERSION}) NEED_MAKE_UPGRADE= t .endif .if exists(${MYMAKE}) SUB_MAKE:= ${MYMAKE} -m ${.CURDIR}/share/mk .elif defined(NEED_MAKE_UPGRADE) # It may not exist yet but we may cause it to. # In the case of fmake, upgrade_checks may cause a newer version to be built. SUB_MAKE= `test -x ${MYMAKE} && echo ${MYMAKE} || echo ${MAKE}` \ -m ${.CURDIR}/share/mk .else SUB_MAKE= ${MAKE} -m ${.CURDIR}/share/mk .endif _MAKE= PATH=${PATH} MAKE_CMD="${MAKE}" ${SUB_MAKE} -f Makefile.inc1 \ TARGET=${_TARGET} TARGET_ARCH=${_TARGET_ARCH} ${_MAKEARGS} .if defined(MK_META_MODE) && ${MK_META_MODE} == "yes" # Only allow meta mode for the whitelisted targets. See META_TGT_WHITELIST # above. If overridden as a make argument then don't bother trying to # disable it. .if empty(.MAKEOVERRIDES:MMK_META_MODE) .for _tgt in ${META_TGT_WHITELIST} .if make(${_tgt}) _CAN_USE_META_MODE?= yes .endif .endfor .if !defined(_CAN_USE_META_MODE) _MAKE+= MK_META_MODE=no MK_META_MODE= no .if defined(.PARSEDIR) .unexport META_MODE .endif .endif # !defined(_CAN_USE_META_MODE) .endif # empty(.MAKEOVERRIDES:MMK_META_MODE) .if ${MK_META_MODE} == "yes" .if !exists(/dev/filemon) && !defined(NO_FILEMON) && !make(showconfig) # Require filemon be loaded to provide a working incremental build .error ${.newline}ERROR: The filemon module (/dev/filemon) is not loaded. \ ${.newline}ERROR: WITH_META_MODE is enabled but requires filemon for an incremental build. \ ${.newline}ERROR: 'kldload filemon' or pass -DNO_FILEMON to suppress this error. .endif # !exists(/dev/filemon) && !defined(NO_FILEMON) .endif # ${MK_META_MODE} == yes .endif # defined(MK_META_MODE) && ${MK_META_MODE} == yes # Guess target architecture from target type, and vice versa, based on # historic FreeBSD practice of tending to have TARGET == TARGET_ARCH # expanding to TARGET == TARGET_CPUARCH in recent times, with known # exceptions. .if !defined(TARGET_ARCH) && defined(TARGET) # T->TA mapping is usually TARGET with arm64 the odd man out _TARGET_ARCH= ${TARGET:S/arm64/aarch64/:S/riscv/riscv64/} .elif !defined(TARGET) && defined(TARGET_ARCH) && \ ${TARGET_ARCH} != ${MACHINE_ARCH} # TA->T mapping is accidentally CPUARCH with aarch64 the odd man out _TARGET= ${TARGET_ARCH:${__TO_CPUARCH}:C/aarch64/arm64/} .endif .if defined(TARGET) && !defined(_TARGET) _TARGET=${TARGET} .endif .if defined(TARGET_ARCH) && !defined(_TARGET_ARCH) _TARGET_ARCH=${TARGET_ARCH} .endif # for historical compatibility for xdev targets .if defined(XDEV) _TARGET= ${XDEV} .endif .if defined(XDEV_ARCH) _TARGET_ARCH= ${XDEV_ARCH} .endif # Some targets require a set TARGET/TARGET_ARCH, check before the default # MACHINE and after the compatibility handling. .if !defined(_TARGET) || !defined(_TARGET_ARCH) ${XTGTS}: _assert_target .endif # Otherwise, default to current machine type and architecture. _TARGET?= ${MACHINE} _TARGET_ARCH?= ${MACHINE_ARCH} .if make(native-xtools*) NXB_TARGET:= ${_TARGET} NXB_TARGET_ARCH:= ${_TARGET_ARCH} _TARGET= ${MACHINE} _TARGET_ARCH= ${MACHINE_ARCH} _MAKE+= NXB_TARGET=${NXB_TARGET} \ NXB_TARGET_ARCH=${NXB_TARGET_ARCH} .endif .if make(print-dir) .SILENT: .endif _assert_target: .PHONY .MAKE .for _tgt in ${XTGTS} .if make(${_tgt}) @echo "*** Error: Both TARGET and TARGET_ARCH must be defined for \"${_tgt}\" target" @false .endif .endfor # # Make sure we have an up-to-date make(1). Only world and buildworld # should do this as those are the initial targets used for upgrades. # The user can define ALWAYS_CHECK_MAKE to have this check performed # for all targets. # .if defined(ALWAYS_CHECK_MAKE) || !defined(.PARSEDIR) ${TGTS}: upgrade_checks .else buildworld: upgrade_checks .endif # # Handle the user-driven targets, using the source relative mk files. # tinderbox toolchains kernel-toolchains: .MAKE ${TGTS}: .PHONY .MAKE ${_+_}@cd ${.CURDIR}; ${_MAKE} ${.TARGET} # The historic default "all" target creates files which may cause stale # or (in the cross build case) unlinkable results. Fail with an error # when no target is given. The users can explicitly specify "all" # if they want the historic behavior. .MAIN: _guard _guard: .PHONY @echo @echo "Explicit target required. Likely \"${SUBDIR_OVERRIDE:Dall:Ubuildworld}\" is wanted. See build(7)." @echo @false STARTTIME!= LC_ALL=C date CHECK_TIME!= cmp=`mktemp`; find ${.CURDIR}/sys/sys/param.h -newer "$$cmp" && rm "$$cmp"; echo .if !empty(CHECK_TIME) .error check your date/time: ${STARTTIME} .endif .if defined(HISTORICAL_MAKE_WORLD) || defined(DESTDIR) # # world # # Attempt to rebuild and reinstall everything. This target is not to be # used for upgrading an existing FreeBSD system, because the kernel is # not included. One can argue that this target doesn't build everything # then. # world: upgrade_checks .PHONY @echo "--------------------------------------------------------------" @echo ">>> make world started on ${STARTTIME}" @echo "--------------------------------------------------------------" .if target(pre-world) @echo @echo "--------------------------------------------------------------" @echo ">>> Making 'pre-world' target" @echo "--------------------------------------------------------------" ${_+_}@cd ${.CURDIR}; ${_MAKE} pre-world .endif ${_+_}@cd ${.CURDIR}; ${_MAKE} buildworld ${_+_}@cd ${.CURDIR}; ${_MAKE} installworld MK_META_MODE=no .if target(post-world) @echo @echo "--------------------------------------------------------------" @echo ">>> Making 'post-world' target" @echo "--------------------------------------------------------------" ${_+_}@cd ${.CURDIR}; ${_MAKE} post-world .endif @echo @echo "--------------------------------------------------------------" @echo ">>> make world completed on `LC_ALL=C date`" @echo " (started ${STARTTIME})" @echo "--------------------------------------------------------------" .else world: .PHONY @echo "WARNING: make world will overwrite your existing FreeBSD" @echo "installation without also building and installing a new" @echo "kernel. This can be dangerous. Please read the handbook," @echo "'Rebuilding world', for how to upgrade your system." @echo "Define DESTDIR to where you want to install FreeBSD," @echo "including /, to override this warning and proceed as usual." @echo "" @echo "Bailing out now..." @false .endif # # kernel # # Short hand for `make buildkernel installkernel' # kernel: buildkernel installkernel .PHONY # # Perform a few tests to determine if the installed tools are adequate # for building the world. # upgrade_checks: .PHONY .if defined(NEED_MAKE_UPGRADE) @${_+_}(cd ${.CURDIR} && ${MAKE} ${WANT_MAKE:S,^f,,}) .endif # # Upgrade make(1) to the current version using the installed # headers, libraries and tools. Also, allow the location of # the system bsdmake-like utility to be overridden. # MMAKEENV= \ DESTDIR= \ INSTALL="sh ${.CURDIR}/tools/install.sh" MMAKE= ${MMAKEENV} ${MAKE} \ OBJTOP=${MYMAKE:H}/obj \ OBJROOT='$${OBJTOP}/' \ MAKEOBJDIRPREFIX= \ MAN= -DNO_SHARED \ -DNO_CPU_CFLAGS -DNO_WERROR \ -DNO_SUBDIR \ DESTDIR= PROGNAME=${MYMAKE:T} bmake: .PHONY @echo @echo "--------------------------------------------------------------" @echo ">>> Building an up-to-date ${.TARGET}(1)" @echo "--------------------------------------------------------------" ${_+_}@cd ${.CURDIR}/usr.bin/${.TARGET}; \ ${MMAKE} obj; \ ${MMAKE} depend; \ ${MMAKE} all; \ ${MMAKE} install DESTDIR=${MYMAKE:H} BINDIR= regress: .PHONY @echo "'make regress' has been renamed 'make check'" | /usr/bin/fmt @false tinderbox toolchains kernel-toolchains kernels worlds: upgrade_checks tinderbox: .PHONY @cd ${.CURDIR}; ${SUB_MAKE} DOING_TINDERBOX=YES universe toolchains: .PHONY @cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=toolchain universe kernel-toolchains: .PHONY @cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=kernel-toolchain universe kernels: .PHONY @cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=buildkernel universe worlds: .PHONY @cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=buildworld universe # # universe # # Attempt to rebuild *everything* for all supported architectures, # with a reasonable chance of success, regardless of how old your # existing system is. # .if make(universe) || make(universe_kernels) || make(tinderbox) || \ make(targets) || make(universe-toolchain) TARGETS?=amd64 arm arm64 i386 mips powerpc riscv sparc64 _UNIVERSE_TARGETS= ${TARGETS} TARGET_ARCHES_arm?= arm armv6 armv7 TARGET_ARCHES_arm64?= aarch64 TARGET_ARCHES_mips?= mipsel mips mips64el mips64 mipsn32 mipselhf mipshf mips64elhf mips64hf TARGET_ARCHES_powerpc?= powerpc powerpc64 powerpcspe # riscv64sf excluded due to PR 232085 TARGET_ARCHES_riscv?= riscv64 .for target in ${TARGETS} TARGET_ARCHES_${target}?= ${target} .endfor MAKE_PARAMS_riscv?= CROSS_TOOLCHAIN=riscv64-gcc MK_COVERAGE=no # XXX Remove architectures only supported by external toolchain from universe # if required toolchain packages are missing. TOOLCHAINS_riscv= riscv64 .for target in riscv .if ${_UNIVERSE_TARGETS:M${target}} .for toolchain in ${TOOLCHAINS_${target}} .if !exists(/usr/local/share/toolchains/${toolchain}-gcc.mk) _UNIVERSE_TARGETS:= ${_UNIVERSE_TARGETS:N${target}} universe: universe_${toolchain}_skip .PHONY universe_epilogue: universe_${toolchain}_skip .PHONY universe_${toolchain}_skip: universe_prologue .PHONY @echo ">> ${target} skipped - install ${toolchain}-xtoolchain-gcc port or package to build" .endif .endfor .endif .endfor .if defined(UNIVERSE_TARGET) MAKE_JUST_WORLDS= YES .else UNIVERSE_TARGET?= buildworld .endif KERNSRCDIR?= ${.CURDIR}/sys targets: .PHONY @echo "Supported TARGET/TARGET_ARCH pairs for world and kernel targets" .for target in ${TARGETS} .for target_arch in ${TARGET_ARCHES_${target}} @echo " ${target}/${target_arch}" .endfor .endfor .if defined(DOING_TINDERBOX) FAILFILE=${.CURDIR}/_.tinderbox.failed MAKEFAIL=tee -a ${FAILFILE} .else MAKEFAIL=cat .endif universe_prologue: upgrade_checks universe: universe_prologue universe_prologue: .PHONY @echo "--------------------------------------------------------------" @echo ">>> make universe started on ${STARTTIME}" @echo "--------------------------------------------------------------" .if defined(DOING_TINDERBOX) @rm -f ${FAILFILE} .endif universe-toolchain: .PHONY universe_prologue @echo "--------------------------------------------------------------" @echo "> Toolchain bootstrap started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" ${_+_}@cd ${.CURDIR}; \ env PATH=${PATH} ${SUB_MAKE} ${JFLAG} kernel-toolchain \ TARGET=${MACHINE} TARGET_ARCH=${MACHINE_ARCH} \ OBJTOP="${HOST_OBJTOP}" \ WITHOUT_SYSTEM_COMPILER=yes \ WITHOUT_SYSTEM_LINKER=yes \ TOOLS_PREFIX_UNDEF= \ kernel-toolchain \ MK_LLVM_TARGET_ALL=yes \ > _.${.TARGET} 2>&1 || \ (echo "${.TARGET} failed," \ "check _.${.TARGET} for details" | \ ${MAKEFAIL}; false) @if [ ! -e "${HOST_OBJTOP}/tmp/usr/bin/cc" ]; then \ echo "Missing host compiler at ${HOST_OBJTOP}/tmp/usr/bin/cc?" >&2; \ false; \ fi @if [ ! -e "${HOST_OBJTOP}/tmp/usr/bin/ld" ]; then \ - echo "Missing host linker at ${HOST_OBJTOP}/tmp/usr/bin/cc?" >&2; \ + echo "Missing host linker at ${HOST_OBJTOP}/tmp/usr/bin/ld?" >&2; \ false; \ fi @echo "--------------------------------------------------------------" @echo "> Toolchain bootstrap completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" .for target in ${_UNIVERSE_TARGETS} universe: universe_${target} universe_epilogue: universe_${target} universe_${target}: universe_${target}_prologue .PHONY universe_${target}_prologue: universe_prologue .PHONY @echo ">> ${target} started on `LC_ALL=C date`" universe_${target}_worlds: .PHONY .if !make(targets) && !make(universe-toolchain) .for target_arch in ${TARGET_ARCHES_${target}} .if !defined(_need_clang_${target}_${target_arch}) _need_clang_${target}_${target_arch} != \ env TARGET=${target} TARGET_ARCH=${target_arch} \ ${SUB_MAKE} -C ${.CURDIR} -f Makefile.inc1 test-system-compiler \ ${MAKE_PARAMS_${target}} -V MK_CLANG_BOOTSTRAP 2>/dev/null || \ echo unknown .export _need_clang_${target}_${target_arch} .endif .if !defined(_need_lld_${target}_${target_arch}) _need_lld_${target}_${target_arch} != \ env TARGET=${target} TARGET_ARCH=${target_arch} \ ${SUB_MAKE} -C ${.CURDIR} -f Makefile.inc1 test-system-linker \ ${MAKE_PARAMS_${target}} -V MK_LLD_BOOTSTRAP 2>/dev/null || \ echo unknown .export _need_lld_${target}_${target_arch} .endif # Setup env for each arch to use the one clang. .if defined(_need_clang_${target}_${target_arch}) && \ ${_need_clang_${target}_${target_arch}} == "yes" # No check on existing XCC or CROSS_BINUTILS_PREFIX, etc, is needed since # we use the test-system-compiler logic to determine if clang needs to be # built. It will be no from that logic if already using an external # toolchain or /usr/bin/cc. # XXX: Passing HOST_OBJTOP into the PATH would allow skipping legacy, # bootstrap-tools, and cross-tools. Need to ensure each tool actually # supports all TARGETS though. # For now we only pass UNIVERSE_TOOLCHAIN_PATH which will be added at the end # of STRICTTMPPATH to ensure that the target-specific binaries come first. MAKE_PARAMS_${target}+= \ XCC="${HOST_OBJTOP}/tmp/usr/bin/cc" \ XCXX="${HOST_OBJTOP}/tmp/usr/bin/c++" \ XCPP="${HOST_OBJTOP}/tmp/usr/bin/cpp" \ UNIVERSE_TOOLCHAIN_PATH=${HOST_OBJTOP}/tmp/usr/bin .endif .if defined(_need_lld_${target}_${target_arch}) && \ ${_need_lld_${target}_${target_arch}} == "yes" MAKE_PARAMS_${target}+= \ XLD="${HOST_OBJTOP}/tmp/usr/bin/ld" .endif .endfor .endif # !make(targets) .if !defined(MAKE_JUST_KERNELS) universe_${target}_done: universe_${target}_worlds .PHONY .for target_arch in ${TARGET_ARCHES_${target}} universe_${target}_worlds: universe_${target}_${target_arch} .PHONY .if (defined(_need_clang_${target}_${target_arch}) && \ ${_need_clang_${target}_${target_arch}} == "yes") || \ (defined(_need_lld_${target}_${target_arch}) && \ ${_need_lld_${target}_${target_arch}} == "yes") universe_${target}_${target_arch}: universe-toolchain universe_${target}_prologue: universe-toolchain .endif universe_${target}_${target_arch}: universe_${target}_prologue .MAKE .PHONY @echo ">> ${target}.${target_arch} ${UNIVERSE_TARGET} started on `LC_ALL=C date`" @(cd ${.CURDIR} && env __MAKE_CONF=/dev/null \ ${SUB_MAKE} ${JFLAG} ${UNIVERSE_TARGET} \ TARGET=${target} \ TARGET_ARCH=${target_arch} \ ${MAKE_PARAMS_${target}} \ > _.${target}.${target_arch}.${UNIVERSE_TARGET} 2>&1 || \ (echo "${target}.${target_arch} ${UNIVERSE_TARGET} failed," \ "check _.${target}.${target_arch}.${UNIVERSE_TARGET} for details" | \ ${MAKEFAIL})) @echo ">> ${target}.${target_arch} ${UNIVERSE_TARGET} completed on `LC_ALL=C date`" .endfor .endif # !MAKE_JUST_KERNELS .if !defined(MAKE_JUST_WORLDS) universe_${target}_done: universe_${target}_kernels .PHONY universe_${target}_kernels: universe_${target}_worlds .PHONY universe_${target}_kernels: universe_${target}_prologue .MAKE .PHONY @if [ -e "${KERNSRCDIR}/${target}/conf/NOTES" ]; then \ (cd ${KERNSRCDIR}/${target}/conf && env __MAKE_CONF=/dev/null \ ${SUB_MAKE} LINT \ > ${.CURDIR}/_.${target}.makeLINT 2>&1 || \ (echo "${target} 'make LINT' failed," \ "check _.${target}.makeLINT for details"| ${MAKEFAIL})); \ fi @cd ${.CURDIR}; ${SUB_MAKE} ${.MAKEFLAGS} TARGET=${target} \ universe_kernels .endif # !MAKE_JUST_WORLDS # Tell the user the worlds and kernels have completed universe_${target}: universe_${target}_done universe_${target}_done: @echo ">> ${target} completed on `LC_ALL=C date`" .endfor .if make(universe_kernconfs) || make(universe_kernels) .if !defined(TARGET) TARGET!= uname -m .endif universe_kernels_prologue: .PHONY @echo ">> ${TARGET} kernels started on `LC_ALL=C date`" universe_kernels: universe_kernconfs .PHONY @echo ">> ${TARGET} kernels completed on `LC_ALL=C date`" .if defined(MAKE_ALL_KERNELS) _THINNER=cat .elif defined(MAKE_LINT_KERNELS) _THINNER=grep 'LINT' || true .else _THINNER=xargs grep -L "^.NO_UNIVERSE" || true .endif KERNCONFS!= cd ${KERNSRCDIR}/${TARGET}/conf && \ find [[:upper:][:digit:]]*[[:upper:][:digit:]] \ -type f -maxdepth 0 \ ! -name DEFAULTS ! -name NOTES | \ ${_THINNER} universe_kernconfs: universe_kernels_prologue .PHONY .for kernel in ${KERNCONFS} TARGET_ARCH_${kernel}!= cd ${KERNSRCDIR}/${TARGET}/conf && \ config -m ${KERNSRCDIR}/${TARGET}/conf/${kernel} 2> /dev/null | \ grep -v WARNING: | cut -f 2 .if empty(TARGET_ARCH_${kernel}) .error "Target architecture for ${TARGET}/conf/${kernel} unknown. config(8) likely too old." .endif universe_kernconfs: universe_kernconf_${TARGET}_${kernel} universe_kernconf_${TARGET}_${kernel}: .MAKE @echo ">> ${TARGET}.${TARGET_ARCH_${kernel}} ${kernel} kernel started on `LC_ALL=C date`" @(cd ${.CURDIR} && env __MAKE_CONF=/dev/null \ ${SUB_MAKE} ${JFLAG} buildkernel \ TARGET=${TARGET} \ TARGET_ARCH=${TARGET_ARCH_${kernel}} \ ${MAKE_PARAMS_${TARGET}} \ KERNCONF=${kernel} \ > _.${TARGET}.${kernel} 2>&1 || \ (echo "${TARGET} ${kernel} kernel failed," \ "check _.${TARGET}.${kernel} for details"| ${MAKEFAIL})) @echo ">> ${TARGET}.${TARGET_ARCH_${kernel}} ${kernel} kernel completed on `LC_ALL=C date`" .endfor .endif # make(universe_kernels) universe: universe_epilogue universe_epilogue: .PHONY @echo "--------------------------------------------------------------" @echo ">>> make universe completed on `LC_ALL=C date`" @echo " (started ${STARTTIME})" @echo "--------------------------------------------------------------" .if defined(DOING_TINDERBOX) @if [ -e ${FAILFILE} ] ; then \ echo "Tinderbox failed:" ;\ cat ${FAILFILE} ;\ exit 1 ;\ fi .endif .endif buildLINT: .PHONY ${MAKE} -C ${.CURDIR}/sys/${_TARGET}/conf LINT .if defined(.PARSEDIR) # This makefile does not run in meta mode .MAKE.MODE= normal # Normally the things we run from here don't either. # Using -DWITH_META_MODE # we can buildworld with meta files created which are useful # for debugging, but without any of the rest of a meta mode build. MK_DIRDEPS_BUILD= no MK_STAGING= no # tell meta.autodep.mk to not even think about updating anything. UPDATE_DEPENDFILE= NO .if !make(showconfig) .export MK_DIRDEPS_BUILD MK_STAGING UPDATE_DEPENDFILE .endif .if make(universe) # we do not want a failure of one branch abort all. MAKE_JOB_ERROR_TOKEN= no .export MAKE_JOB_ERROR_TOKEN .endif .endif # bmake .endif # DIRDEPS_BUILD Index: projects/runtime-coverage-v2/lib/libc/string/strstr.c =================================================================== --- projects/runtime-coverage-v2/lib/libc/string/strstr.c (revision 346057) +++ projects/runtime-coverage-v2/lib/libc/string/strstr.c (revision 346058) @@ -1,190 +1,189 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2005-2014 Rich Felker, et al. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include static char *twobyte_strstr(const unsigned char *h, const unsigned char *n) { uint16_t nw = n[0]<<8 | n[1], hw = h[0]<<8 | h[1]; for (h++; *h && hw != nw; hw = hw<<8 | *++h); return *h ? (char *)h-1 : 0; } static char *threebyte_strstr(const unsigned char *h, const unsigned char *n) { uint32_t nw = n[0]<<24 | n[1]<<16 | n[2]<<8; uint32_t hw = h[0]<<24 | h[1]<<16 | h[2]<<8; for (h+=2; *h && hw != nw; hw = (hw|*++h)<<8); return *h ? (char *)h-2 : 0; } static char *fourbyte_strstr(const unsigned char *h, const unsigned char *n) { uint32_t nw = n[0]<<24 | n[1]<<16 | n[2]<<8 | n[3]; uint32_t hw = h[0]<<24 | h[1]<<16 | h[2]<<8 | h[3]; for (h+=3; *h && hw != nw; hw = hw<<8 | *++h); return *h ? (char *)h-3 : 0; } #define MAX(a,b) ((a)>(b)?(a):(b)) #define MIN(a,b) ((a)<(b)?(a):(b)) #define BITOP(a,b,op) \ ((a)[(size_t)(b)/(8*sizeof *(a))] op (size_t)1<<((size_t)(b)%(8*sizeof *(a)))) /* * Two Way string search algorithm, with a bad shift table applied to the last * byte of the window. A bit array marks which entries in the shift table are * initialized to avoid fully initializing a 1kb/2kb table. * * Reference: CROCHEMORE M., PERRIN D., 1991, Two-way string-matching, * Journal of the ACM 38(3):651-675 */ static char *twoway_strstr(const unsigned char *h, const unsigned char *n) { const unsigned char *z; size_t l, ip, jp, k, p, ms, p0, mem, mem0; size_t byteset[32 / sizeof(size_t)] = { 0 }; size_t shift[256]; /* Computing length of needle and fill shift table */ for (l=0; n[l] && h[l]; l++) BITOP(byteset, n[l], |=), shift[n[l]] = l+1; if (n[l]) return 0; /* hit the end of h */ /* Compute maximal suffix */ ip = -1; jp = 0; k = p = 1; while (jp+k n[jp+k]) { jp += k; k = 1; p = jp - ip; } else { ip = jp++; k = p = 1; } } ms = ip; p0 = p; /* And with the opposite comparison */ ip = -1; jp = 0; k = p = 1; while (jp+k ms+1) ms = ip; else p = p0; /* Periodic needle? */ if (memcmp(n, n+p, ms+1)) { mem0 = 0; p = MAX(ms, l-ms-1) + 1; } else mem0 = l-p; mem = 0; /* Initialize incremental end-of-haystack pointer */ z = h; /* Search loop */ for (;;) { /* Update incremental end-of-haystack pointer */ if (z-h < l) { /* Fast estimate for MIN(l,63) */ size_t grow = l | 63; const unsigned char *z2 = memchr(z, 0, grow); if (z2) { z = z2; if (z-h < l) return 0; } else z += grow; } /* Check last byte first; advance by shift on mismatch */ if (BITOP(byteset, h[l-1], &)) { k = l-shift[h[l-1]]; - //printf("adv by %zu (on %c) at [%s] (%zu;l=%zu)\n", k, h[l-1], h, shift[h[l-1]], l); if (k) { - if (mem0 && mem && k < p) k = l-p; + if (k < mem) k = mem; h += k; mem = 0; continue; } } else { h += l; mem = 0; continue; } /* Compare right half */ for (k=MAX(ms+1,mem); n[k] && n[k] == h[k]; k++); if (n[k]) { h += k-ms; mem = 0; continue; } /* Compare left half */ for (k=ms+1; k>mem && n[k-1] == h[k-1]; k--); if (k <= mem) return (char *)h; h += p; mem = mem0; } } char *strstr(const char *h, const char *n) { /* Return immediately on empty needle */ if (!n[0]) return (char *)h; /* Use faster algorithms for short needles */ h = strchr(h, *n); if (!h || !n[1]) return (char *)h; if (!h[1]) return 0; if (!n[2]) return twobyte_strstr((void *)h, (void *)n); if (!h[2]) return 0; if (!n[3]) return threebyte_strstr((void *)h, (void *)n); if (!h[3]) return 0; if (!n[4]) return fourbyte_strstr((void *)h, (void *)n); return twoway_strstr((void *)h, (void *)n); } Index: projects/runtime-coverage-v2/lib/libc/sys/procctl.2 =================================================================== --- projects/runtime-coverage-v2/lib/libc/sys/procctl.2 (revision 346057) +++ projects/runtime-coverage-v2/lib/libc/sys/procctl.2 (revision 346058) @@ -1,598 +1,600 @@ .\" Copyright (c) 2013 Hudson River Trading LLC .\" Written by: John H. Baldwin .\" All rights reserved. .\" .\" Copyright (c) 2014 The FreeBSD Foundation .\" Portions of this documentation were written by Konstantin Belousov .\" under sponsorship from the FreeBSD Foundation. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd February 23, 2019 +.Dd April 9, 2019 .Dt PROCCTL 2 .Os .Sh NAME .Nm procctl .Nd control processes .Sh LIBRARY .Lb libc .Sh SYNOPSIS .In sys/procctl.h .Ft int .Fn procctl "idtype_t idtype" "id_t id" "int cmd" "void *arg" .Sh DESCRIPTION The .Fn procctl system call provides for control over processes. The .Fa idtype and .Fa id arguments specify the set of processes to control. If multiple processes match the identifier, .Nm will make a .Dq best effort to control as many of the selected processes as possible. An error is only returned if no selected processes successfully complete the request. The following identifier types are supported: .Bl -tag -width P_PGID .It Dv P_PID Control the process with the process ID .Fa id . .It Dv P_PGID Control processes belonging to the process group with the ID .Fa id . .El .Pp The control request to perform is specified by the .Fa cmd argument. The following commands are supported: .Bl -tag -width PROC_TRAPCAP_STATUS .It Dv PROC_ASLR_CTL Controls the Address Space Layout Randomization (ASLR) in the program images created by .Xr execve 2 in the specified process or its descendants that did not changed the control nor modified it by other means. The .Va arg parameter must point to the integer variable holding one of the following values: .Bl -tag -width PROC_ASLR_FORCE_DISABLE .It Dv PROC_ASLR_FORCE_ENABLE Request that ASLR is enabled after execution, even if it is disabled system-wide. The image flag and set-uid might prevent ASLR enablement still. .It Dv PROC_ASLR_FORCE_DISABLE Request that ASLR is disabled after execution. Same notes as for .Dv PROC_ASKR_FORCE_ENABLE apply. .It Dv PROC_ASLR_NOFORCE Use system-wide configured policy for ASLR. .El .It Dv PROC_ASLR_STATUS Returns the current status of ASLR enablement for the target process. The .Va arg parameter must point to the integer variable, where one of the following values is written: .Bl -tag -width PROC_ASLR_FORCE_DISABLE .It Dv PROC_ASLR_FORCE_ENABLE .It Dv PROC_ASLR_FORCE_DISABLE .It Dv PROC_ASLR_NOFORCE .El .Pp If the currently executed image in the process itself has ASLR enabled, the .Dv PROC_ASLR_ACTIVE flag is or-ed with the value listed above. .It Dv PROC_SPROTECT Set process protection state. This is used to mark a process as protected from being killed if the system exhausts the available memory and swap. The .Fa arg parameter must point to an integer containing an operation and zero or more optional flags. The following operations are supported: .Bl -tag -width PPROT_CLEAR .It Dv PPROT_SET Mark the selected processes as protected. .It Dv PPROT_CLEAR Clear the protected state of selected processes. .El .Pp The following optional flags are supported: .Bl -tag -width PPROT_DESCEND .It Dv PPROT_DESCEND Apply the requested operation to all child processes of each selected process in addition to each selected process. .It Dv PPROT_INHERIT When used with .Dv PPROT_SET , mark all future child processes of each selected process as protected. Future child processes will also mark all of their future child processes. .El .It Dv PROC_REAP_ACQUIRE Acquires the reaper status for the current process. Reaper status means that children orphaned by the reaper's descendants that were forked after the acquisition of reaper status are reparented to the reaper process. After system initialization, .Xr init 8 is the default reaper. .It Dv PROC_REAP_RELEASE Release the reaper state for the current process. The reaper of the current process becomes the new reaper of the current process's descendants. .It Dv PROC_REAP_STATUS Provides information about the reaper of the specified process, or the process itself when it is a reaper. The .Fa data argument must point to a .Vt procctl_reaper_status structure which is filled in by the syscall on successful return. .Bd -literal struct procctl_reaper_status { u_int rs_flags; u_int rs_children; u_int rs_descendants; pid_t rs_reaper; pid_t rs_pid; }; .Ed The .Fa rs_flags may have the following flags returned: .Bl -tag -width REAPER_STATUS_REALINIT .It Dv REAPER_STATUS_OWNED The specified process has acquired reaper status and has not released it. When the flag is returned, the specified process .Fa id , pid, identifies the reaper, otherwise the .Fa rs_reaper field of the structure is set to the pid of the reaper for the specified process id. .It Dv REAPER_STATUS_REALINIT The specified process is the root of the reaper tree, i.e., .Xr init 8 . .El .Pp The .Fa rs_children field returns the number of children of the reaper among the descendants. It is possible to have a child whose reaper is not the specified process, since the reaper for any existing children is not reset on the .Dv PROC_REAP_ACQUIRE operation. The .Fa rs_descendants field returns the total number of descendants of the reaper(s), not counting descendants of the reaper in the subtree. The .Fa rs_reaper field returns the reaper pid. The .Fa rs_pid returns the pid of one reaper child if there are any descendants. .It Dv PROC_REAP_GETPIDS Queries the list of descendants of the reaper of the specified process. The request takes a pointer to a .Vt procctl_reaper_pids structure in the .Fa data parameter. .Bd -literal struct procctl_reaper_pids { u_int rp_count; struct procctl_reaper_pidinfo *rp_pids; }; .Ed When called, the .Fa rp_pids field must point to an array of .Vt procctl_reaper_pidinfo structures, to be filled in on return, and the .Fa rp_count field must specify the size of the array, into which no more than .Fa rp_count elements will be filled in by the kernel. .Pp The .Vt "struct procctl_reaper_pidinfo" structure provides some information about one of the reaper's descendants. Note that for a descendant that is not a child, it may be incorrectly identified because of a race in which the original child process exited and the exited process's pid was reused for an unrelated process. .Bd -literal struct procctl_reaper_pidinfo { pid_t pi_pid; pid_t pi_subtree; u_int pi_flags; }; .Ed The .Fa pi_pid field is the process id of the descendant. The .Fa pi_subtree field provides the pid of the child of the reaper, which is the (grand-)parent of the process. The .Fa pi_flags field returns the following flags, further describing the descendant: .Bl -tag -width REAPER_PIDINFO_REAPER .It Dv REAPER_PIDINFO_VALID Set to indicate that the .Vt procctl_reaper_pidinfo structure was filled in by the kernel. Zero-filling the .Fa rp_pids array and testing the .Dv REAPER_PIDINFO_VALID flag allows the caller to detect the end of the returned array. .It Dv REAPER_PIDINFO_CHILD The .Fa pi_pid field identifies the direct child of the reaper. .It Dv REAPER_PIDINFO_REAPER The reported process is itself a reaper. The descendants of the subordinate reaper are not reported. .El .It Dv PROC_REAP_KILL Request to deliver a signal to some subset of the descendants of the reaper. The .Fa data parameter must point to a .Vt procctl_reaper_kill structure, which is used both for parameters and status return. .Bd -literal struct procctl_reaper_kill { int rk_sig; u_int rk_flags; pid_t rk_subtree; u_int rk_killed; pid_t rk_fpid; }; .Ed The .Fa rk_sig field specifies the signal to be delivered. Zero is not a valid signal number, unlike for .Xr kill 2 . The .Fa rk_flags field further directs the operation. It is or-ed from the following flags: .Bl -tag -width REAPER_KILL_CHILDREN .It Dv REAPER_KILL_CHILDREN Deliver the specified signal only to direct children of the reaper. .It Dv REAPER_KILL_SUBTREE Deliver the specified signal only to descendants that were forked by the direct child with pid specified in the .Fa rk_subtree field. .El If neither the .Dv REAPER_KILL_CHILDREN nor the .Dv REAPER_KILL_SUBTREE flags are specified, all current descendants of the reaper are signalled. .Pp If a signal was delivered to any process, the return value from the request is zero. In this case, the .Fa rk_killed field identifies the number of processes signalled. The .Fa rk_fpid field is set to the pid of the first process for which signal delivery failed, e.g., due to permission problems. If no such process exists, the .Fa rk_fpid field is set to -1. .It Dv PROC_TRACE_CTL Enable or disable tracing of the specified process(es), according to the value of the integer argument. Tracing includes attachment to the process using the .Xr ptrace 2 and .Xr ktrace 2 , debugging sysctls, .Xr hwpmc 4 , .Xr dtrace 1 , and core dumping. Possible values for the .Fa data argument are: .Bl -tag -width PROC_TRACE_CTL_DISABLE_EXEC .It Dv PROC_TRACE_CTL_ENABLE Enable tracing, after it was disabled by .Dv PROC_TRACE_CTL_DISABLE . Only allowed for self. .It Dv PROC_TRACE_CTL_DISABLE Disable tracing for the specified process. Tracing is re-enabled when the process changes the executing program with the .Xr execve 2 syscall. A child inherits the trace settings from the parent on .Xr fork 2 . .It Dv PROC_TRACE_CTL_DISABLE_EXEC Same as .Dv PROC_TRACE_CTL_DISABLE , but the setting persists for the process even after .Xr execve 2 . .El .It Dv PROC_TRACE_STATUS Returns the current tracing status for the specified process in the integer variable pointed to by .Fa data . If tracing is disabled, .Fa data is set to -1. If tracing is enabled, but no debugger is attached by the .Xr ptrace 2 syscall, .Fa data is set to 0. If a debugger is attached, .Fa data is set to the pid of the debugger process. .It Dv PROC_TRAPCAP_CTL Controls the capability mode sandbox actions for the specified sandboxed processes, on a return from any syscall which gives either a .Er ENOTCAPABLE or .Er ECAPMODE error. If the control is enabled, such errors from the syscalls cause delivery of the synchronous .Dv SIGTRAP signal to the thread immediately before returning from the syscalls. .Pp Possible values for the .Fa data argument are: .Bl -tag -width PROC_TRAPCAP_CTL_DISABLE .It Dv PROC_TRAPCAP_CTL_ENABLE Enable the .Dv SIGTRAP signal delivery on capability mode access violations. The enabled mode is inherited by the children of the process, and is kept after .Xr fexecve 2 calls. .It Dv PROC_TRAPCAP_CTL_DISABLE Disable the signal delivery on capability mode access violations. Note that the global sysctl .Dv kern.trap_enotcap might still cause the signal to be delivered. See .Xr capsicum 4 . .El .Pp On signal delivery, the .Va si_errno member of the .Fa siginfo signal handler parameter is set to the syscall error value, and the .Va si_code member is set to .Dv TRAP_CAP . .Pp See .Xr capsicum 4 for more information about the capability mode. .It Dv PROC_TRAPCAP_STATUS Return the current status of signalling capability mode access violations for the specified process. The integer value pointed to by the .Fa data argument is set to the .Dv PROC_TRAPCAP_CTL_ENABLE value if the process control enables signal delivery, and to .Dv PROC_TRAPCAP_CTL_DISABLE otherwise. .Pp See the note about sysctl .Dv kern.trap_enotcap above, which gives independent global control of signal delivery. .It Dv PROC_PDEATHSIG_CTL Request the delivery of a signal when the parent of the calling process exits. .Fa idtype must be .Dv P_PID and .Fa id must be the either caller's pid or zero, with no difference in effect. The value is cleared for child processes and when executing set-user-ID or set-group-ID binaries. .Fa arg must point to a value of type .Vt int indicating the signal that should be delivered to the caller. Use zero to cancel a previously requested signal delivery. .It Dv PROC_PDEATHSIG_STATUS Query the current signal number that will be delivered when the parent of the calling process exits. .Fa idtype must be .Dv P_PID and .Fa id must be the either caller's pid or zero, with no difference in effect. .Fa arg must point to a memory location that can hold a value of type .Vt int . If signal delivery has not been requested, it will contain zero on return. .El .Sh NOTES Disabling tracing on a process should not be considered a security feature, as it is bypassable both by the kernel and privileged processes, and via other system mechanisms. As such, it should not be utilized to reliably protect cryptographic keying material or other confidential data. .Sh RETURN VALUES If an error occurs, a value of -1 is returned and .Va errno is set to indicate the error. .Sh ERRORS The .Fn procctl system call will fail if: .Bl -tag -width Er .It Bq Er EFAULT The .Fa arg parameter points outside the process's allocated address space. .It Bq Er EINVAL The .Fa cmd argument specifies an unsupported command. .Pp The .Fa idtype argument specifies an unsupported identifier type. .It Bq Er EPERM The calling process does not have permission to perform the requested operation on any of the selected processes. .It Bq Er ESRCH No processes matched the requested .Fa idtype and .Fa id . .It Bq Er EINVAL An invalid operation or flag was passed in .Fa arg for a .Dv PROC_SPROTECT command. .It Bq Er EPERM The .Fa idtype argument is not equal to .Dv P_PID , or .Fa id is not equal to the pid of the calling process, for .Dv PROC_REAP_ACQUIRE or .Dv PROC_REAP_RELEASE requests. .It Bq Er EINVAL Invalid or undefined flags were passed to a .Dv PROC_REAP_KILL request. .It Bq Er EINVAL An invalid or zero signal number was requested for a .Dv PROC_REAP_KILL request. .It Bq Er EINVAL The .Dv PROC_REAP_RELEASE request was issued by the .Xr init 8 process. .It Bq Er EBUSY The .Dv PROC_REAP_ACQUIRE request was issued by a process that had already acquired reaper status and has not yet released it. .It Bq Er EBUSY The .Dv PROC_TRACE_CTL request was issued for a process already being traced. .It Bq Er EPERM The .Dv PROC_TRACE_CTL request to re-enable tracing of the process .Po Dv PROC_TRACE_CTL_ENABLE Pc , or to disable persistence of .Dv PROC_TRACE_CTL_DISABLE on .Xr execve 2 was issued for a non-current process. .It Bq Er EINVAL The value of the integer .Fa data parameter for the .Dv PROC_TRACE_CTL or .Dv PROC_TRAPCAP_CTL request is invalid. .It Bq Er EINVAL The .Dv PROC_PDEATHSIG_CTL or .Dv PROC_PDEATHSIG_STATUS request referenced an unsupported .Fa id , .Fa idtype or invalid signal number. .El .Sh SEE ALSO .Xr dtrace 1 , +.Xr proccontrol 1 , +.Xr protect 1 , .Xr cap_enter 2, .Xr kill 2 , .Xr ktrace 2 , .Xr ptrace 2 , .Xr wait 2 , .Xr capsicum 4 , .Xr hwpmc 4 , .Xr init 8 .Sh HISTORY The .Fn procctl function appeared in .Fx 10.0 . .Pp The reaper facility is based on a similar feature of Linux and DragonflyBSD, and first appeared in .Fx 10.2 . .Pp The .Dv PROC_PDEATHSIG_CTL facility is based on the prctl(PR_SET_PDEATHSIG, ...) feature of Linux, and first appeared in .Fx 11.2 . .Pp The ASLR support was added to system for the checklists compliance in .Fx 13.0 . Index: projects/runtime-coverage-v2/share/man/man7/hostname.7 =================================================================== --- projects/runtime-coverage-v2/share/man/man7/hostname.7 (revision 346057) +++ projects/runtime-coverage-v2/share/man/man7/hostname.7 (revision 346058) @@ -1,88 +1,88 @@ .\" Copyright (c) 1987, 1990, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)hostname.7 8.2 (Berkeley) 12/30/93 .\" $FreeBSD$ .\" .Dd December 25, 2013 .Dt HOSTNAME 7 .Os .Sh NAME .Nm hostname .Nd host name resolution description .Sh DESCRIPTION Hostnames are domains, where a domain is a hierarchical, dot-separated list of subdomains; for example, the machine monet, in the Berkeley subdomain of the EDU subdomain of the Internet would be represented as .Pp .Dl monet.Berkeley.EDU .Pp (with no trailing dot). .Pp Hostnames are often used with network client and server programs, which must generally translate the name to an address for use. (This function is generally performed by the library routine .Xr gethostbyname 3 . ) Hostnames are resolved by the Internet name resolver in the following fashion. .Pp If the name consists of a single component, i.e., contains no dot, and if the environment variable .Dq Ev HOSTALIASES is set to the name of a file, that file is searched for any string matching the input hostname. The file should consist of lines made up of two white-space separated strings, the first of which is the hostname alias, and the second of which is the complete hostname to be substituted for that alias. If a case-insensitive match is found between the hostname to be resolved and the first field of a line in the file, the substituted name is looked up with no further processing. .Pp If the input name ends with a trailing dot, the trailing dot is removed, and the remaining name is looked up with no further processing. .Pp If the input name does not end with a trailing dot, it is looked up by searching through a list of domains until a match is found. The default search list includes first the local domain, then its parent domains with at least 2 name components (longest first). For example, in the domain CS.Berkeley.EDU, the name lithium.CChem will be checked first as lithium.CChem.CS.Berkeley.EDU and then as lithium.CChem.Berkeley.EDU. Lithium.CChem.EDU will not be tried, as there is only one component remaining from the local domain. The search path can be changed from the default by a system-wide configuration file (see .Xr resolver 5 ) . .Sh SEE ALSO .Xr gethostbyname 3 , -.Xr resolver 5 , +.Xr resolver 5 .Sh HISTORY .Nm Hostname appeared in .Bx 4.2 . Index: projects/runtime-coverage-v2/sys/dev/usb/net/if_ure.c =================================================================== --- projects/runtime-coverage-v2/sys/dev/usb/net/if_ure.c (revision 346057) +++ projects/runtime-coverage-v2/sys/dev/usb/net/if_ure.c (revision 346058) @@ -1,1264 +1,1277 @@ /*- * Copyright (c) 2015-2016 Kevin Lo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR ure_debug #include #include #include #include #ifdef USB_DEBUG static int ure_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ure, CTLFLAG_RW, 0, "USB ure"); SYSCTL_INT(_hw_usb_ure, OID_AUTO, debug, CTLFLAG_RWTUN, &ure_debug, 0, "Debug level"); #endif +#define ETHER_IS_ZERO(addr) \ + (!(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5])) + /* * Various supported device vendors/products. */ static const STRUCT_USB_HOST_ID ure_devs[] = { #define URE_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) } URE_DEV(LENOVO, RTL8153, 0), URE_DEV(LENOVO, TBT3LAN, 0), URE_DEV(LENOVO, ONELINK, 0), URE_DEV(LENOVO, USBCLAN, 0), URE_DEV(NVIDIA, RTL8153, 0), URE_DEV(REALTEK, RTL8152, URE_FLAG_8152), URE_DEV(REALTEK, RTL8153, 0), URE_DEV(TPLINK, RTL8153, 0), #undef URE_DEV }; static device_probe_t ure_probe; static device_attach_t ure_attach; static device_detach_t ure_detach; static usb_callback_t ure_bulk_read_callback; static usb_callback_t ure_bulk_write_callback; static miibus_readreg_t ure_miibus_readreg; static miibus_writereg_t ure_miibus_writereg; static miibus_statchg_t ure_miibus_statchg; static uether_fn_t ure_attach_post; static uether_fn_t ure_init; static uether_fn_t ure_stop; static uether_fn_t ure_start; static uether_fn_t ure_tick; static uether_fn_t ure_rxfilter; static int ure_ctl(struct ure_softc *, uint8_t, uint16_t, uint16_t, void *, int); static int ure_read_mem(struct ure_softc *, uint16_t, uint16_t, void *, int); static int ure_write_mem(struct ure_softc *, uint16_t, uint16_t, void *, int); static uint8_t ure_read_1(struct ure_softc *, uint16_t, uint16_t); static uint16_t ure_read_2(struct ure_softc *, uint16_t, uint16_t); static uint32_t ure_read_4(struct ure_softc *, uint16_t, uint16_t); static int ure_write_1(struct ure_softc *, uint16_t, uint16_t, uint32_t); static int ure_write_2(struct ure_softc *, uint16_t, uint16_t, uint32_t); static int ure_write_4(struct ure_softc *, uint16_t, uint16_t, uint32_t); static uint16_t ure_ocp_reg_read(struct ure_softc *, uint16_t); static void ure_ocp_reg_write(struct ure_softc *, uint16_t, uint16_t); static void ure_read_chipver(struct ure_softc *); static int ure_attach_post_sub(struct usb_ether *); static void ure_reset(struct ure_softc *); static int ure_ifmedia_upd(struct ifnet *); static void ure_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int ure_ioctl(struct ifnet *, u_long, caddr_t); static void ure_rtl8152_init(struct ure_softc *); static void ure_rtl8153_init(struct ure_softc *); static void ure_disable_teredo(struct ure_softc *); static void ure_init_fifo(struct ure_softc *); static const struct usb_config ure_config[URE_N_TRANSFER] = { [URE_BULK_DT_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = MCLBYTES, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = ure_bulk_write_callback, .timeout = 10000, /* 10 seconds */ }, [URE_BULK_DT_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 16384, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = ure_bulk_read_callback, .timeout = 0, /* no timeout */ }, }; static device_method_t ure_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, ure_probe), DEVMETHOD(device_attach, ure_attach), DEVMETHOD(device_detach, ure_detach), /* MII interface. */ DEVMETHOD(miibus_readreg, ure_miibus_readreg), DEVMETHOD(miibus_writereg, ure_miibus_writereg), DEVMETHOD(miibus_statchg, ure_miibus_statchg), DEVMETHOD_END }; static driver_t ure_driver = { .name = "ure", .methods = ure_methods, .size = sizeof(struct ure_softc), }; static devclass_t ure_devclass; DRIVER_MODULE(ure, uhub, ure_driver, ure_devclass, NULL, NULL); DRIVER_MODULE(miibus, ure, miibus_driver, miibus_devclass, NULL, NULL); MODULE_DEPEND(ure, uether, 1, 1, 1); MODULE_DEPEND(ure, usb, 1, 1, 1); MODULE_DEPEND(ure, ether, 1, 1, 1); MODULE_DEPEND(ure, miibus, 1, 1, 1); MODULE_VERSION(ure, 1); USB_PNP_HOST_INFO(ure_devs); static const struct usb_ether_methods ure_ue_methods = { .ue_attach_post = ure_attach_post, .ue_attach_post_sub = ure_attach_post_sub, .ue_start = ure_start, .ue_init = ure_init, .ue_stop = ure_stop, .ue_tick = ure_tick, .ue_setmulti = ure_rxfilter, .ue_setpromisc = ure_rxfilter, .ue_mii_upd = ure_ifmedia_upd, .ue_mii_sts = ure_ifmedia_sts, }; static int ure_ctl(struct ure_softc *sc, uint8_t rw, uint16_t val, uint16_t index, void *buf, int len) { struct usb_device_request req; URE_LOCK_ASSERT(sc, MA_OWNED); if (rw == URE_CTL_WRITE) req.bmRequestType = UT_WRITE_VENDOR_DEVICE; else req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, val); USETW(req.wIndex, index); USETW(req.wLength, len); return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); } static int ure_read_mem(struct ure_softc *sc, uint16_t addr, uint16_t index, void *buf, int len) { return (ure_ctl(sc, URE_CTL_READ, addr, index, buf, len)); } static int ure_write_mem(struct ure_softc *sc, uint16_t addr, uint16_t index, void *buf, int len) { return (ure_ctl(sc, URE_CTL_WRITE, addr, index, buf, len)); } static uint8_t ure_read_1(struct ure_softc *sc, uint16_t reg, uint16_t index) { uint32_t val; uint8_t temp[4]; uint8_t shift; shift = (reg & 3) << 3; reg &= ~3; ure_read_mem(sc, reg, index, &temp, 4); val = UGETDW(temp); val >>= shift; return (val & 0xff); } static uint16_t ure_read_2(struct ure_softc *sc, uint16_t reg, uint16_t index) { uint32_t val; uint8_t temp[4]; uint8_t shift; shift = (reg & 2) << 3; reg &= ~3; ure_read_mem(sc, reg, index, &temp, 4); val = UGETDW(temp); val >>= shift; return (val & 0xffff); } static uint32_t ure_read_4(struct ure_softc *sc, uint16_t reg, uint16_t index) { uint8_t temp[4]; ure_read_mem(sc, reg, index, &temp, 4); return (UGETDW(temp)); } static int ure_write_1(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) { uint16_t byen; uint8_t temp[4]; uint8_t shift; byen = URE_BYTE_EN_BYTE; shift = reg & 3; val &= 0xff; if (reg & 3) { byen <<= shift; val <<= (shift << 3); reg &= ~3; } USETDW(temp, val); return (ure_write_mem(sc, reg, index | byen, &temp, 4)); } static int ure_write_2(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) { uint16_t byen; uint8_t temp[4]; uint8_t shift; byen = URE_BYTE_EN_WORD; shift = reg & 2; val &= 0xffff; if (reg & 2) { byen <<= shift; val <<= (shift << 3); reg &= ~3; } USETDW(temp, val); return (ure_write_mem(sc, reg, index | byen, &temp, 4)); } static int ure_write_4(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) { uint8_t temp[4]; USETDW(temp, val); return (ure_write_mem(sc, reg, index | URE_BYTE_EN_DWORD, &temp, 4)); } static uint16_t ure_ocp_reg_read(struct ure_softc *sc, uint16_t addr) { uint16_t reg; ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000); reg = (addr & 0x0fff) | 0xb000; return (ure_read_2(sc, reg, URE_MCU_TYPE_PLA)); } static void ure_ocp_reg_write(struct ure_softc *sc, uint16_t addr, uint16_t data) { uint16_t reg; ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000); reg = (addr & 0x0fff) | 0xb000; ure_write_2(sc, reg, URE_MCU_TYPE_PLA, data); } static int ure_miibus_readreg(device_t dev, int phy, int reg) { struct ure_softc *sc; uint16_t val; int locked; sc = device_get_softc(dev); locked = mtx_owned(&sc->sc_mtx); if (!locked) URE_LOCK(sc); /* Let the rgephy driver read the URE_GMEDIASTAT register. */ if (reg == URE_GMEDIASTAT) { if (!locked) URE_UNLOCK(sc); return (ure_read_1(sc, URE_GMEDIASTAT, URE_MCU_TYPE_PLA)); } val = ure_ocp_reg_read(sc, URE_OCP_BASE_MII + reg * 2); if (!locked) URE_UNLOCK(sc); return (val); } static int ure_miibus_writereg(device_t dev, int phy, int reg, int val) { struct ure_softc *sc; int locked; sc = device_get_softc(dev); if (sc->sc_phyno != phy) return (0); locked = mtx_owned(&sc->sc_mtx); if (!locked) URE_LOCK(sc); ure_ocp_reg_write(sc, URE_OCP_BASE_MII + reg * 2, val); if (!locked) URE_UNLOCK(sc); return (0); } static void ure_miibus_statchg(device_t dev) { struct ure_softc *sc; struct mii_data *mii; struct ifnet *ifp; int locked; sc = device_get_softc(dev); mii = GET_MII(sc); locked = mtx_owned(&sc->sc_mtx); if (!locked) URE_LOCK(sc); ifp = uether_getifp(&sc->sc_ue); if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done; sc->sc_flags &= ~URE_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->sc_flags |= URE_FLAG_LINK; break; case IFM_1000_T: if ((sc->sc_flags & URE_FLAG_8152) != 0) break; sc->sc_flags |= URE_FLAG_LINK; break; default: break; } } /* Lost link, do nothing. */ if ((sc->sc_flags & URE_FLAG_LINK) == 0) goto done; done: if (!locked) URE_UNLOCK(sc); } /* * Probe for a RTL8152/RTL8153 chip. */ static int ure_probe(device_t dev) { struct usb_attach_arg *uaa; uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != URE_CONFIG_IDX) return (ENXIO); if (uaa->info.bIfaceIndex != URE_IFACE_IDX) return (ENXIO); return (usbd_lookup_id_by_uaa(ure_devs, sizeof(ure_devs), uaa)); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int ure_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct ure_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; uint8_t iface_index; int error; sc->sc_flags = USB_GET_DRIVER_INFO(uaa); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); iface_index = URE_IFACE_IDX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, ure_config, URE_N_TRANSFER, sc, &sc->sc_mtx); if (error != 0) { device_printf(dev, "allocating USB transfers failed\n"); goto detach; } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &ure_ue_methods; error = uether_ifattach(ue); if (error != 0) { device_printf(dev, "could not attach interface\n"); goto detach; } return (0); /* success */ detach: ure_detach(dev); return (ENXIO); /* failure */ } static int ure_detach(device_t dev) { struct ure_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; usbd_transfer_unsetup(sc->sc_xfer, URE_N_TRANSFER); uether_ifdetach(ue); mtx_destroy(&sc->sc_mtx); return (0); } static void ure_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ure_softc *sc = usbd_xfer_softc(xfer); struct usb_ether *ue = &sc->sc_ue; struct ifnet *ifp = uether_getifp(ue); struct usb_page_cache *pc; struct ure_rxpkt pkt; int actlen, len; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (actlen < (int)(sizeof(pkt))) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto tr_setup; } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, &pkt, sizeof(pkt)); len = le32toh(pkt.ure_pktlen) & URE_RXPKT_LEN_MASK; len -= ETHER_CRC_LEN; if (actlen < (int)(len + sizeof(pkt))) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto tr_setup; } uether_rxbuf(ue, pc, sizeof(pkt), len); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); uether_rxflush(ue); return; default: /* Error */ DPRINTF("bulk read error, %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static void ure_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ure_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; struct ure_txpkt txpkt; int len, pos; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete\n"); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: if ((sc->sc_flags & URE_FLAG_LINK) == 0 || (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { /* * don't send anything if there is no link ! */ return; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; pos = 0; len = m->m_pkthdr.len; pc = usbd_xfer_get_frame(xfer, 0); memset(&txpkt, 0, sizeof(txpkt)); txpkt.ure_pktlen = htole32((len & URE_TXPKT_LEN_MASK) | URE_TKPKT_TX_FS | URE_TKPKT_TX_LS); usbd_copy_in(pc, pos, &txpkt, sizeof(txpkt)); pos += sizeof(txpkt); usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); pos += m->m_pkthdr.len; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* * If there's a BPF listener, bounce a copy * of this frame to him. */ BPF_MTAP(ifp, m); m_freem(m); /* Set frame length. */ usbd_xfer_set_frame_len(xfer, 0, pos); usbd_transfer_submit(xfer); ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static void ure_read_chipver(struct ure_softc *sc) { uint16_t ver; ver = ure_read_2(sc, URE_PLA_TCR1, URE_MCU_TYPE_PLA) & URE_VERSION_MASK; switch (ver) { case 0x4c00: sc->sc_chip |= URE_CHIP_VER_4C00; break; case 0x4c10: sc->sc_chip |= URE_CHIP_VER_4C10; break; case 0x5c00: sc->sc_chip |= URE_CHIP_VER_5C00; break; case 0x5c10: sc->sc_chip |= URE_CHIP_VER_5C10; break; case 0x5c20: sc->sc_chip |= URE_CHIP_VER_5C20; break; case 0x5c30: sc->sc_chip |= URE_CHIP_VER_5C30; break; default: device_printf(sc->sc_ue.ue_dev, "unknown version 0x%04x\n", ver); break; } } static void ure_attach_post(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); sc->sc_phyno = 0; /* Determine the chip version. */ ure_read_chipver(sc); /* Initialize controller and get station address. */ if (sc->sc_flags & URE_FLAG_8152) ure_rtl8152_init(sc); else ure_rtl8153_init(sc); - if (sc->sc_chip & URE_CHIP_VER_4C00) + if ((sc->sc_chip & URE_CHIP_VER_4C00) || + (sc->sc_chip & URE_CHIP_VER_4C10)) ure_read_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA, ue->ue_eaddr, 8); else ure_read_mem(sc, URE_PLA_BACKUP, URE_MCU_TYPE_PLA, ue->ue_eaddr, 8); + + if (ETHER_IS_ZERO(sc->sc_ue.ue_eaddr)) { + device_printf(sc->sc_ue.ue_dev, "MAC assigned randomly\n"); + arc4rand(sc->sc_ue.ue_eaddr, ETHER_ADDR_LEN, 0); + sc->sc_ue.ue_eaddr[0] &= ~0x01; /* unicast */ + sc->sc_ue.ue_eaddr[0] |= 0x02; /* locally administered */ + } } static int ure_attach_post_sub(struct usb_ether *ue) { struct ure_softc *sc; struct ifnet *ifp; int error; sc = uether_getsc(ue); ifp = ue->ue_ifp; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = uether_start; ifp->if_ioctl = ure_ioctl; ifp->if_init = uether_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); mtx_lock(&Giant); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, 0); mtx_unlock(&Giant); return (error); } static void ure_init(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); URE_LOCK_ASSERT(sc, MA_OWNED); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* Cancel pending I/O. */ ure_stop(ue); ure_reset(sc); /* Set MAC address. */ + ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_CONFIG); ure_write_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA | URE_BYTE_EN_SIX_BYTES, IF_LLADDR(ifp), 8); + ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_NORAML); /* Reset the packet filter. */ ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) & ~URE_FMC_FCR_MCU_EN); ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) | URE_FMC_FCR_MCU_EN); /* Enable transmit and receive. */ ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) | URE_CR_RE | URE_CR_TE); ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) & ~URE_RXDY_GATED_EN); /* Configure RX filters. */ ure_rxfilter(ue); usbd_xfer_set_stall(sc->sc_xfer[URE_BULK_DT_WR]); /* Indicate we are up and running. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; /* Switch to selected media. */ ure_ifmedia_upd(ifp); } static void ure_tick(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); struct mii_data *mii = GET_MII(sc); URE_LOCK_ASSERT(sc, MA_OWNED); mii_tick(mii); if ((sc->sc_flags & URE_FLAG_LINK) == 0 && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->sc_flags |= URE_FLAG_LINK; ure_start(ue); } } /* * Program the 64-bit multicast hash filter. */ static void ure_rxfilter(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); struct ifmultiaddr *ifma; uint32_t h, rxmode; uint32_t hashes[2] = { 0, 0 }; URE_LOCK_ASSERT(sc, MA_OWNED); rxmode = URE_RCR_APM; if (ifp->if_flags & IFF_BROADCAST) rxmode |= URE_RCR_AB; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { if (ifp->if_flags & IFF_PROMISC) rxmode |= URE_RCR_AAP; rxmode |= URE_RCR_AM; hashes[0] = hashes[1] = 0xffffffff; goto done; } rxmode |= URE_RCR_AM; if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } if_maddr_runlock(ifp); h = bswap32(hashes[0]); hashes[0] = bswap32(hashes[1]); hashes[1] = h; rxmode |= URE_RCR_AM; done: ure_write_4(sc, URE_PLA_MAR0, URE_MCU_TYPE_PLA, hashes[0]); ure_write_4(sc, URE_PLA_MAR4, URE_MCU_TYPE_PLA, hashes[1]); ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, rxmode); } static void ure_start(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); /* * start the USB transfers, if not already started: */ usbd_transfer_start(sc->sc_xfer[URE_BULK_DT_RD]); usbd_transfer_start(sc->sc_xfer[URE_BULK_DT_WR]); } static void ure_reset(struct ure_softc *sc) { int i; ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, URE_CR_RST); for (i = 0; i < URE_TIMEOUT; i++) { if (!(ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) & URE_CR_RST)) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "reset never completed\n"); } /* * Set media options. */ static int ure_ifmedia_upd(struct ifnet *ifp) { struct ure_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); struct mii_softc *miisc; int error; URE_LOCK_ASSERT(sc, MA_OWNED); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); return (error); } /* * Report current media status. */ static void ure_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct ure_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = GET_MII(sc); URE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; URE_UNLOCK(sc); } static int ure_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct usb_ether *ue = ifp->if_softc; struct ure_softc *sc; struct ifreq *ifr; int error, mask, reinit; sc = uether_getsc(ue); ifr = (struct ifreq *)data; error = 0; reinit = 0; if (cmd == SIOCSIFCAP) { URE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) ifp->if_drv_flags &= ~IFF_DRV_RUNNING; else reinit = 0; URE_UNLOCK(sc); if (reinit > 0) uether_init(ue); } else error = uether_ioctl(ifp, cmd, data); return (error); } static void ure_rtl8152_init(struct ure_softc *sc) { uint32_t pwrctrl; /* Disable ALDPS. */ ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA | URE_DIS_SDSAVE); uether_pause(&sc->sc_ue, hz / 50); if (sc->sc_chip & URE_CHIP_VER_4C00) { ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) & ~URE_LED_MODE_MASK); } ure_write_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB, ure_read_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB) & ~URE_POWER_CUT); ure_write_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB, ure_read_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB) & ~URE_RESUME_INDICATE); ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) | URE_TX_10M_IDLE_EN | URE_PFM_PWM_SWITCH); pwrctrl = ure_read_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA); pwrctrl &= ~URE_MCU_CLK_RATIO_MASK; pwrctrl |= URE_MCU_CLK_RATIO | URE_D3_CLK_GATED_EN; ure_write_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, pwrctrl); ure_write_2(sc, URE_PLA_GPHY_INTR_IMR, URE_MCU_TYPE_PLA, URE_GPHY_STS_MSK | URE_SPEED_DOWN_MSK | URE_SPDWN_RXDV_MSK | URE_SPDWN_LINKCHG_MSK); /* Disable Rx aggregation. */ ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB, ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) | URE_RX_AGG_DISABLE); /* Disable ALDPS. */ ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA | URE_DIS_SDSAVE); uether_pause(&sc->sc_ue, hz / 50); ure_init_fifo(sc); ure_write_1(sc, URE_USB_TX_AGG, URE_MCU_TYPE_USB, URE_TX_AGG_MAX_THRESHOLD); ure_write_4(sc, URE_USB_RX_BUF_TH, URE_MCU_TYPE_USB, URE_RX_THR_HIGH); ure_write_4(sc, URE_USB_TX_DMA, URE_MCU_TYPE_USB, URE_TEST_MODE_DISABLE | URE_TX_SIZE_ADJUST1); } static void ure_rtl8153_init(struct ure_softc *sc) { uint16_t val; uint8_t u1u2[8]; int i; /* Disable ALDPS. */ ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS); uether_pause(&sc->sc_ue, hz / 50); memset(u1u2, 0x00, sizeof(u1u2)); ure_write_mem(sc, URE_USB_TOLERANCE, URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_2(sc, URE_PLA_BOOT_CTRL, URE_MCU_TYPE_PLA) & URE_AUTOLOAD_DONE) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for chip autoload\n"); for (i = 0; i < URE_TIMEOUT; i++) { val = ure_ocp_reg_read(sc, URE_OCP_PHY_STATUS) & URE_PHY_STAT_MASK; if (val == URE_PHY_STAT_LAN_ON || val == URE_PHY_STAT_PWRDN) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for phy to stabilize\n"); ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB) & ~URE_U2P3_ENABLE); if (sc->sc_chip & URE_CHIP_VER_5C10) { val = ure_read_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB); val &= ~URE_PWD_DN_SCALE_MASK; val |= URE_PWD_DN_SCALE(96); ure_write_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB, val); ure_write_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB, ure_read_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB) | URE_USB2PHY_L1 | URE_USB2PHY_SUSPEND); } else if (sc->sc_chip & URE_CHIP_VER_5C20) { ure_write_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA, ure_read_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA) & ~URE_ECM_ALDPS); } if (sc->sc_chip & (URE_CHIP_VER_5C20 | URE_CHIP_VER_5C30)) { val = ure_read_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB); if (ure_read_2(sc, URE_USB_BURST_SIZE, URE_MCU_TYPE_USB) == 0) val &= ~URE_DYNAMIC_BURST; else val |= URE_DYNAMIC_BURST; ure_write_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB, val); } ure_write_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB, ure_read_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB) | URE_EP4_FULL_FC); ure_write_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB, ure_read_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB) & ~URE_TIMER11_EN); ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) & ~URE_LED_MODE_MASK); if ((sc->sc_chip & URE_CHIP_VER_5C10) && usbd_get_speed(sc->sc_ue.ue_udev) != USB_SPEED_SUPER) val = URE_LPM_TIMER_500MS; else val = URE_LPM_TIMER_500US; ure_write_1(sc, URE_USB_LPM_CTRL, URE_MCU_TYPE_USB, val | URE_FIFO_EMPTY_1FB | URE_ROK_EXIT_LPM); val = ure_read_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB); val &= ~URE_SEN_VAL_MASK; val |= URE_SEN_VAL_NORMAL | URE_SEL_RXIDLE; ure_write_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB, val); ure_write_2(sc, URE_USB_CONNECT_TIMER, URE_MCU_TYPE_USB, 0x0001); ure_write_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB, ure_read_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB) & ~(URE_PWR_EN | URE_PHASE2_EN)); ure_write_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB, ure_read_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB) & ~URE_PCUT_STATUS); memset(u1u2, 0xff, sizeof(u1u2)); ure_write_mem(sc, URE_USB_TOLERANCE, URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); ure_write_2(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, URE_ALDPS_SPDWN_RATIO); ure_write_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_PLA, URE_EEE_SPDWN_RATIO); ure_write_2(sc, URE_PLA_MAC_PWR_CTRL3, URE_MCU_TYPE_PLA, URE_PKT_AVAIL_SPDWN_EN | URE_SUSPEND_SPDWN_EN | URE_U1U2_SPDWN_EN | URE_L1_SPDWN_EN); ure_write_2(sc, URE_PLA_MAC_PWR_CTRL4, URE_MCU_TYPE_PLA, URE_PWRSAVE_SPDWN_EN | URE_RXDV_SPDWN_EN | URE_TX10MIDLE_EN | URE_TP100_SPDWN_EN | URE_TP500_SPDWN_EN | URE_TP1000_SPDWN_EN | URE_EEE_SPDWN_EN); val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB); if (!(sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10))) val |= URE_U2P3_ENABLE; else val &= ~URE_U2P3_ENABLE; ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val); memset(u1u2, 0x00, sizeof(u1u2)); ure_write_mem(sc, URE_USB_TOLERANCE, URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); /* Disable ALDPS. */ ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS); uether_pause(&sc->sc_ue, hz / 50); ure_init_fifo(sc); /* Disable Rx aggregation. */ ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB, ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) | URE_RX_AGG_DISABLE); val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB); if (!(sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10))) val |= URE_U2P3_ENABLE; else val &= ~URE_U2P3_ENABLE; ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val); memset(u1u2, 0xff, sizeof(u1u2)); ure_write_mem(sc, URE_USB_TOLERANCE, URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); } static void ure_stop(struct usb_ether *ue) { struct ure_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); URE_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~URE_FLAG_LINK; /* * stop all the transfers, if not already stopped: */ usbd_transfer_stop(sc->sc_xfer[URE_BULK_DT_WR]); usbd_transfer_stop(sc->sc_xfer[URE_BULK_DT_RD]); } static void ure_disable_teredo(struct ure_softc *sc) { ure_write_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA, ure_read_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA) & ~(URE_TEREDO_SEL | URE_TEREDO_RS_EVENT_MASK | URE_OOB_TEREDO_EN)); ure_write_2(sc, URE_PLA_WDT6_CTRL, URE_MCU_TYPE_PLA, URE_WDT6_SET_MODE); ure_write_2(sc, URE_PLA_REALWOW_TIMER, URE_MCU_TYPE_PLA, 0); ure_write_4(sc, URE_PLA_TEREDO_TIMER, URE_MCU_TYPE_PLA, 0); } static void ure_init_fifo(struct ure_softc *sc) { uint32_t rx_fifo1, rx_fifo2; int i; ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) | URE_RXDY_GATED_EN); ure_disable_teredo(sc); ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA) & ~URE_RCR_ACPT_ALL); if (!(sc->sc_flags & URE_FLAG_8152)) { if (sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10 | URE_CHIP_VER_5C20)) { ure_ocp_reg_write(sc, URE_OCP_ADC_CFG, URE_CKADSEL_L | URE_ADC_EN | URE_EN_EMI_L); } if (sc->sc_chip & URE_CHIP_VER_5C00) { ure_ocp_reg_write(sc, URE_OCP_EEE_CFG, ure_ocp_reg_read(sc, URE_OCP_EEE_CFG) & ~URE_CTAP_SHORT_EN); } ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) | URE_EEE_CLKDIV_EN); ure_ocp_reg_write(sc, URE_OCP_DOWN_SPEED, ure_ocp_reg_read(sc, URE_OCP_DOWN_SPEED) | URE_EN_10M_BGOFF); ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) | URE_EN_10M_PLLOFF); ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_IMPEDANCE); ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0b13); ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) | URE_PFM_PWM_SWITCH); /* Enable LPF corner auto tune. */ ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_LPF_CFG); ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0xf70f); /* Adjust 10M amplitude. */ ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP1); ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x00af); ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP2); ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0208); } ure_reset(sc); ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, 0); ure_write_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA, ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & ~URE_NOW_IS_OOB); ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) & ~URE_MCU_BORW_EN); for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & URE_LINK_LIST_READY) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for OOB control\n"); ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) | URE_RE_INIT_LL); for (i = 0; i < URE_TIMEOUT; i++) { if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & URE_LINK_LIST_READY) break; uether_pause(&sc->sc_ue, hz / 100); } if (i == URE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "timeout waiting for OOB control\n"); ure_write_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA) & ~URE_CPCR_RX_VLAN); ure_write_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA, ure_read_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA) | URE_TCR0_AUTO_FIFO); /* Configure Rx FIFO threshold. */ ure_write_4(sc, URE_PLA_RXFIFO_CTRL0, URE_MCU_TYPE_PLA, URE_RXFIFO_THR1_NORMAL); if (usbd_get_speed(sc->sc_ue.ue_udev) == USB_SPEED_FULL) { rx_fifo1 = URE_RXFIFO_THR2_FULL; rx_fifo2 = URE_RXFIFO_THR3_FULL; } else { rx_fifo1 = URE_RXFIFO_THR2_HIGH; rx_fifo2 = URE_RXFIFO_THR3_HIGH; } ure_write_4(sc, URE_PLA_RXFIFO_CTRL1, URE_MCU_TYPE_PLA, rx_fifo1); ure_write_4(sc, URE_PLA_RXFIFO_CTRL2, URE_MCU_TYPE_PLA, rx_fifo2); /* Configure Tx FIFO threshold. */ ure_write_4(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA, URE_TXFIFO_THR_NORMAL); } Index: projects/runtime-coverage-v2/sys/kern/imgact_elf.c =================================================================== --- projects/runtime-coverage-v2/sys/kern/imgact_elf.c (revision 346057) +++ projects/runtime-coverage-v2/sys/kern/imgact_elf.c (revision 346058) @@ -1,2711 +1,2719 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2017 Dell EMC * Copyright (c) 2000-2001, 2003 David O'Brien * Copyright (c) 1995-1996 Søren Schmidt * Copyright (c) 1996 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ELF_NOTE_ROUNDSIZE 4 #define OLD_EI_BRAND 8 static int __elfN(check_header)(const Elf_Ehdr *hdr); static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int32_t *osrel, uint32_t *fctl0); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry); static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel); static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); static boolean_t __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel, uint32_t *fctl0); static vm_prot_t __elfN(trans_prot)(Elf_Word); static Elf_Word __elfN(untrans_prot)(vm_prot_t); SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, ""); #define CORE_BUF_SIZE (16 * 1024) int __elfN(fallback_brand) = -1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); static int elf_legacy_coredump = 0; SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, &elf_legacy_coredump, 0, "include all and only RW pages in core dumps"); int __elfN(nxstack) = #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \ (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \ defined(__riscv) 1; #else 0; #endif SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) int i386_read_exec = 0; SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, "enable execution from readable segments"); #endif SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr, CTLFLAG_RW, 0, ""); #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr) static int __elfN(aslr_enabled) = 0; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable address map randomization"); static int __elfN(pie_aslr_enabled) = 0; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN, &__elfN(pie_aslr_enabled), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable address map randomization for PIE binaries"); static int __elfN(aslr_honor_sbrk) = 1; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, &__elfN(aslr_honor_sbrk), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) static const char FREEBSD_ABI_VENDOR[] = "FreeBSD"; Elf_Brandnote __elfN(freebsd_brandnote) = { .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), .hdr.n_descsz = sizeof(int32_t), .hdr.n_type = NT_FREEBSD_ABI_TAG, .vendor = FREEBSD_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = __elfN(freebsd_trans_osrel) }; static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) { uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); *osrel = *(const int32_t *)(p); return (true); } static const char GNU_ABI_VENDOR[] = "GNU"; static int GNU_KFREEBSD_ABI_DESC = 3; Elf_Brandnote __elfN(kfreebsd_brandnote) = { .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), .hdr.n_descsz = 16, /* XXX at least 16 */ .hdr.n_type = 1, .vendor = GNU_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = kfreebsd_trans_osrel }; static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) { const Elf32_Word *desc; uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); desc = (const Elf32_Word *)p; if (desc[0] != GNU_KFREEBSD_ABI_DESC) return (false); /* * Debian GNU/kFreeBSD embed the earliest compatible kernel version * (__FreeBSD_version: Rxx) in the LSB way. */ *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; return (true); } int __elfN(insert_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == NULL) { elf_brand_list[i] = entry; break; } } if (i == MAX_BRANDS) { printf("WARNING: %s: could not insert brandinfo entry: %p\n", __func__, entry); return (-1); } return (0); } int __elfN(remove_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == entry) { elf_brand_list[i] = NULL; break; } } if (i == MAX_BRANDS) return (-1); return (0); } int __elfN(brand_inuse)(Elf_Brandinfo *entry) { struct proc *p; int rval = FALSE; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_sysent == entry->sysvec) { rval = TRUE; break; } } sx_sunlock(&allproc_lock); return (rval); } static Elf_Brandinfo * __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int32_t *osrel, uint32_t *fctl0) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; Elf_Brandinfo *bi, *bi_m; boolean_t ret; int i, interp_name_len; interp_name_len = interp != NULL ? strlen(interp) + 1 : 0; /* * We support four types of branding -- (1) the ELF EI_OSABI field * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string * branding w/in the ELF header, (3) path of the `interp_path' * field, and (4) the ".note.ABI-tag" ELF section. */ /* Look for an ".note.ABI-tag" ELF section */ bi_m = NULL; for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL) continue; if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0) continue; if (hdr->e_machine == bi->machine && (bi->flags & (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { ret = __elfN(check_note)(imgp, bi->brand_note, osrel, fctl0); /* Give brand a chance to veto check_note's guess */ if (ret && bi->header_supported) ret = bi->header_supported(imgp); /* * If note checker claimed the binary, but the * interpreter path in the image does not * match default one for the brand, try to * search for other brands with the same * interpreter. Either there is better brand * with the right interpreter, or, failing * this, we return first brand which accepted * our note and, optionally, header. */ if (ret && bi_m == NULL && interp != NULL && (bi->interp_path == NULL || (strlen(bi->interp_path) + 1 != interp_name_len || strncmp(interp, bi->interp_path, interp_name_len) != 0))) { bi_m = bi; ret = 0; } if (ret) return (bi); } } if (bi_m != NULL) return (bi_m); /* If the executable has a brand, search for it in the brand list. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) continue; if (hdr->e_machine == bi->machine && (hdr->e_ident[EI_OSABI] == bi->brand || (bi->compat_3_brand != NULL && strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND], bi->compat_3_brand) == 0))) { /* Looks good, but give brand a chance to veto */ if (bi->header_supported == NULL || bi->header_supported(imgp)) { /* * Again, prefer strictly matching * interpreter path. */ if (interp_name_len == 0 && bi->interp_path == NULL) return (bi); if (bi->interp_path != NULL && strlen(bi->interp_path) + 1 == interp_name_len && strncmp(interp, bi->interp_path, interp_name_len) == 0) return (bi); if (bi_m == NULL) bi_m = bi; } } } if (bi_m != NULL) return (bi_m); /* No known brand, see if the header is recognized by any brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || bi->header_supported == NULL) continue; if (hdr->e_machine == bi->machine) { ret = bi->header_supported(imgp); if (ret) return (bi); } } /* Lacking a known brand, search for a recognized interpreter. */ if (interp != NULL) { for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC)) != 0) continue; if (hdr->e_machine == bi->machine && bi->interp_path != NULL && /* ELF image p_filesz includes terminating zero */ strlen(bi->interp_path) + 1 == interp_name_len && strncmp(interp, bi->interp_path, interp_name_len) == 0 && (bi->header_supported == NULL || bi->header_supported(imgp))) return (bi); } } /* Lacking a recognized interpreter, try the default brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) continue; if (hdr->e_machine == bi->machine && __elfN(fallback_brand) == bi->brand && (bi->header_supported == NULL || bi->header_supported(imgp))) return (bi); } return (NULL); } static int __elfN(check_header)(const Elf_Ehdr *hdr) { Elf_Brandinfo *bi; int i; if (!IS_ELF(*hdr) || hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || hdr->e_ident[EI_DATA] != ELF_TARG_DATA || hdr->e_ident[EI_VERSION] != EV_CURRENT || hdr->e_phentsize != sizeof(Elf_Phdr) || hdr->e_version != ELF_TARG_VER) return (ENOEXEC); /* * Make sure we have at least one brand for this machine. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && bi->machine == hdr->e_machine) break; } if (i == MAX_BRANDS) return (ENOEXEC); return (0); } static int __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot) { struct sf_buf *sf; int error; vm_offset_t off; /* * Create the page if it doesn't exist yet. Ignore errors. */ vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) - trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL); /* * Find the page from the underlying object. */ if (object != NULL) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, end - start); vm_imgact_unmap_page(sf); if (error != 0) return (KERN_FAILURE); } return (KERN_SUCCESS); } static int __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow) { struct sf_buf *sf; vm_offset_t off; vm_size_t sz; int error, locked, rv; if (start != trunc_page(start)) { rv = __elfN(map_partial)(map, object, offset, start, round_page(start), prot); if (rv != KERN_SUCCESS) return (rv); offset += round_page(start) - start; start = round_page(start); } if (end != round_page(end)) { rv = __elfN(map_partial)(map, object, offset + trunc_page(end) - start, trunc_page(end), end, prot); if (rv != KERN_SUCCESS) return (rv); end = trunc_page(end); } if (start >= end) return (KERN_SUCCESS); if ((offset & PAGE_MASK) != 0) { /* * The mapping is not page aligned. This means that we have * to copy the data. */ rv = vm_map_fixed(map, NULL, 0, start, end - start, prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL); if (rv != KERN_SUCCESS) return (rv); if (object == NULL) return (KERN_SUCCESS); for (; start < end; start += sz) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); sz = end - start; if (sz > PAGE_SIZE - off) sz = PAGE_SIZE - off; error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, sz); vm_imgact_unmap_page(sf); if (error != 0) return (KERN_FAILURE); offset += sz; } } else { vm_object_reference(object); rv = vm_map_fixed(map, object, offset, start, end - start, prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL); if (rv != KERN_SUCCESS) { locked = VOP_ISLOCKED(imgp->vp); VOP_UNLOCK(imgp->vp, 0); vm_object_deallocate(object); vn_lock(imgp->vp, locked | LK_RETRY); return (rv); } } return (KERN_SUCCESS); } static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) { struct sf_buf *sf; size_t map_len; vm_map_t map; vm_object_t object; vm_offset_t off, map_addr; int error, rv, cow; size_t copy_len; vm_ooffset_t file_addr; /* * It's necessary to fail if the filsz + offset taken from the * header is greater than the actual file pager object's size. * If we were to allow this, then the vm_map_find() below would * walk right off the end of the file object and into the ether. * * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); } object = imgp->object; map = &imgp->proc->p_vmspace->vm_map; map_addr = trunc_page((vm_offset_t)vmaddr); file_addr = trunc_page(offset); /* * We have two choices. We can either clear the data in the last page * of an oversized mapping, or we can start the anon mapping a page * early and copy the initialized data into that first page. We * choose the second. */ if (filsz == 0) map_len = 0; else if (memsz > filsz) map_len = trunc_page(offset + filsz) - file_addr; else map_len = round_page(offset + filsz) - file_addr; if (map_len != 0) { /* cow flags: don't dump readonly sections in core */ cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); rv = __elfN(map_insert)(imgp, map, object, file_addr, /* file offset */ map_addr, /* virtual start */ map_addr + map_len,/* virtual end */ prot, cow); if (rv != KERN_SUCCESS) return (EINVAL); /* we can stop now if we've covered it all */ if (memsz == filsz) return (0); } /* * We have to get the remaining bit of the file into the first part * of the oversized map segment. This is normally because the .data * segment in the file is extended to provide bss. It's a neat idea * to try and save a page, but it's a pain in the behind to implement. */ copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset + filsz); map_addr = trunc_page((vm_offset_t)vmaddr + filsz); map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; /* This had damn well better be true! */ if (map_len != 0) { rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, map_addr + map_len, prot, 0); if (rv != KERN_SUCCESS) return (EINVAL); } if (copy_len != 0) { sf = vm_imgact_map_page(object, offset + filsz); if (sf == NULL) return (EIO); /* send the page fragment to user space */ off = trunc_page(offset + filsz) - trunc_page(offset + filsz); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)map_addr, copy_len); vm_imgact_unmap_page(sf); if (error != 0) return (error); } /* * Remove write access to the page if it was only granted by map_insert * to allow copyout. */ if ((prot & VM_PROT_WRITE) == 0) vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + map_len), prot, FALSE); return (0); } +static int +__elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr, + const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp) +{ + vm_prot_t prot; + u_long base_addr; + bool first; + int error, i; + + base_addr = 0; + first = true; + + for (i = 0; i < hdr->e_phnum; i++) { + if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) + continue; + + /* Loadable segment */ + prot = __elfN(trans_prot)(phdr[i].p_flags); + error = __elfN(load_section)(imgp, phdr[i].p_offset, + (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, + phdr[i].p_memsz, phdr[i].p_filesz, prot); + if (error != 0) + return (error); + + /* + * Establish the base address if this is the first segment. + */ + if (first) { + base_addr = trunc_page(phdr[i].p_vaddr + rbase); + first = false; + } + } + + if (base_addrp != NULL) + *base_addrp = base_addr; + + return (0); +} + /* * Load the file "file" into memory. It may be either a shared object * or an executable. * * The "addr" reference parameter is in/out. On entry, it specifies * the address where a shared object should be loaded. If the file is * an executable, this value is ignored. On exit, "addr" specifies * where the file was actually loaded. * * The "entry" reference parameter is out only. On exit, it specifies * the entry point for the loaded file. */ static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry) { struct { struct nameidata nd; struct vattr attr; struct image_params image_params; } *tempdata; const Elf_Ehdr *hdr = NULL; const Elf_Phdr *phdr = NULL; struct nameidata *nd; struct vattr *attr; struct image_params *imgp; - vm_prot_t prot; u_long rbase; u_long base_addr = 0; - int error, i, numsegs; + int error; #ifdef CAPABILITY_MODE /* * XXXJA: This check can go away once we are sufficiently confident * that the checks in namei() are correct. */ if (IN_CAPABILITY_MODE(curthread)) return (ECAPMODE); #endif tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); nd = &tempdata->nd; attr = &tempdata->attr; imgp = &tempdata->image_params; /* * Initialize part of the common data */ imgp->proc = p; imgp->attr = attr; imgp->firstpage = NULL; imgp->image_header = NULL; imgp->object = NULL; imgp->execlabel = NULL; NDINIT(nd, LOOKUP, LOCKLEAF | FOLLOW, UIO_SYSSPACE, file, curthread); if ((error = namei(nd)) != 0) { nd->ni_vp = NULL; goto fail; } NDFREE(nd, NDF_ONLY_PNBUF); imgp->vp = nd->ni_vp; /* * Check permissions, modes, uid, etc on the file, and "open" it. */ error = exec_check_permissions(imgp); if (error) goto fail; error = exec_map_first_page(imgp); if (error) goto fail; /* * Also make certain that the interpreter stays the same, so set * its VV_TEXT flag, too. */ VOP_SET_TEXT(nd->ni_vp); imgp->object = nd->ni_vp->v_object; hdr = (const Elf_Ehdr *)imgp->image_header; if ((error = __elfN(check_header)(hdr)) != 0) goto fail; if (hdr->e_type == ET_DYN) rbase = *addr; else if (hdr->e_type == ET_EXEC) rbase = 0; else { error = ENOEXEC; goto fail; } /* Only support headers that fit within first page for now */ if ((hdr->e_phoff > PAGE_SIZE) || (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) { error = ENOEXEC; goto fail; } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { error = ENOEXEC; goto fail; } - for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { - if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) { - /* Loadable segment */ - prot = __elfN(trans_prot)(phdr[i].p_flags); - error = __elfN(load_section)(imgp, phdr[i].p_offset, - (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, - phdr[i].p_memsz, phdr[i].p_filesz, prot); - if (error != 0) - goto fail; - /* - * Establish the base address if this is the - * first segment. - */ - if (numsegs == 0) - base_addr = trunc_page(phdr[i].p_vaddr + - rbase); - numsegs++; - } - } + error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr); + if (error != 0) + goto fail; + *addr = base_addr; *entry = (unsigned long)hdr->e_entry + rbase; fail: if (imgp->firstpage) exec_unmap_first_page(imgp); if (nd->ni_vp) vput(nd->ni_vp); free(tempdata, M_TEMP); return (error); } static u_long __CONCAT(rnd_, __elfN(base))(vm_map_t map __unused, u_long minv, u_long maxv, u_int align) { u_long rbase, res; MPASS(vm_map_min(map) <= minv); MPASS(maxv <= vm_map_max(map)); MPASS(minv < maxv); MPASS(minv + align < maxv); arc4rand(&rbase, sizeof(rbase), 0); res = roundup(minv, (u_long)align) + rbase % (maxv - minv); res &= ~((u_long)align - 1); if (res >= maxv) res -= align; KASSERT(res >= minv, ("res %#lx < minv %#lx, maxv %#lx rbase %#lx", res, minv, maxv, rbase)); KASSERT(res < maxv, ("res %#lx > maxv %#lx, minv %#lx rbase %#lx", res, maxv, minv, rbase)); return (res); } static int __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr, const Elf_Phdr *phdr, u_long et_dyn_addr) { struct vmspace *vmspace; const char *err_str; u_long text_size, data_size, total_size, text_addr, data_addr; u_long seg_size, seg_addr; int i; err_str = NULL; text_size = data_size = total_size = text_addr = data_addr = 0; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) continue; seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); seg_size = round_page(phdr[i].p_memsz + phdr[i].p_vaddr + et_dyn_addr - seg_addr); /* * Make the largest executable segment the official * text segment and all others data. * * Note that obreak() assumes that data_addr + data_size == end * of data load area, and the ELF file format expects segments * to be sorted by address. If multiple data segments exist, * the last one will be used. */ if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) { text_size = seg_size; text_addr = seg_addr; } else { data_size = seg_size; data_addr = seg_addr; } total_size += seg_size; } if (data_addr == 0 && data_size == 0) { data_addr = text_addr; data_size = text_size; } /* * Check limits. It should be safe to check the * limits after loading the segments since we do * not actually fault in all the segments pages. */ PROC_LOCK(imgp->proc); if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) err_str = "Data segment size exceeds process limit"; else if (text_size > maxtsiz) err_str = "Text segment size exceeds system limit"; else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) err_str = "Total segment size exceeds process limit"; else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) err_str = "Data segment size exceeds resource limit"; else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) err_str = "Total segment size exceeds resource limit"; PROC_UNLOCK(imgp->proc); if (err_str != NULL) { uprintf("%s\n", err_str); return (ENOMEM); } vmspace = imgp->proc->p_vmspace; vmspace->vm_tsize = text_size >> PAGE_SHIFT; vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; vmspace->vm_dsize = data_size >> PAGE_SHIFT; vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; return (0); } static int __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr, char **interpp, bool *free_interpp) { struct thread *td; char *interp; int error, interp_name_len; KASSERT(phdr->p_type == PT_INTERP, ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type)); KASSERT(VOP_ISLOCKED(imgp->vp), ("%s: vp %p is not locked", __func__, imgp->vp)); td = curthread; /* Path to interpreter */ if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) { uprintf("Invalid PT_INTERP\n"); return (ENOEXEC); } interp_name_len = phdr->p_filesz; if (phdr->p_offset > PAGE_SIZE || interp_name_len > PAGE_SIZE - phdr->p_offset) { VOP_UNLOCK(imgp->vp, 0); interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); error = vn_rdwr(UIO_READ, imgp->vp, interp, interp_name_len, phdr->p_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, NULL, td); if (error != 0) { free(interp, M_TEMP); uprintf("i/o error PT_INTERP %d\n", error); return (error); } interp[interp_name_len] = '\0'; *interpp = interp; *free_interpp = true; return (0); } interp = __DECONST(char *, imgp->image_header) + phdr->p_offset; if (interp[interp_name_len - 1] != '\0') { uprintf("Invalid PT_INTERP\n"); return (ENOEXEC); } *interpp = interp; *free_interpp = false; return (0); } static int __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info, const char *interp, u_long *addr, u_long *entry) { char *path; int error; if (brand_info->emul_path != NULL && brand_info->emul_path[0] != '\0') { path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, interp); error = __elfN(load_file)(imgp->proc, path, addr, entry); free(path, M_TEMP); if (error == 0) return (0); } if (brand_info->interp_newpath != NULL && (brand_info->interp_path == NULL || strcmp(interp, brand_info->interp_path) == 0)) { error = __elfN(load_file)(imgp->proc, brand_info->interp_newpath, addr, entry); if (error == 0) return (0); } error = __elfN(load_file)(imgp->proc, interp, addr, entry); if (error == 0) return (0); uprintf("ELF interpreter %s not found, error %d\n", interp, error); return (error); } /* * Impossible et_dyn_addr initial value indicating that the real base * must be calculated later with some randomization applied. */ #define ET_DYN_ADDR_RAND 1 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) { struct thread *td; const Elf_Ehdr *hdr; const Elf_Phdr *phdr; Elf_Auxargs *elf_auxargs; struct vmspace *vmspace; vm_map_t map; char *interp; Elf_Brandinfo *brand_info; struct sysentvec *sv; - vm_prot_t prot; u_long addr, baddr, et_dyn_addr, entry, proghdr; u_long maxalign, mapsz, maxv, maxv1; uint32_t fctl0; int32_t osrel; bool free_interp; int error, i, n; hdr = (const Elf_Ehdr *)imgp->image_header; /* * Do we have a valid ELF header ? * * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later * if particular brand doesn't support it. */ if (__elfN(check_header)(hdr) != 0 || (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) return (-1); /* * From here on down, we return an errno, not -1, as we've * detected an ELF file. */ if ((hdr->e_phoff > PAGE_SIZE) || (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) { /* Only support headers in first page for now */ uprintf("Program headers not in the first page\n"); return (ENOEXEC); } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { uprintf("Unaligned program headers\n"); return (ENOEXEC); } n = error = 0; baddr = 0; osrel = 0; fctl0 = 0; entry = proghdr = 0; interp = NULL; free_interp = false; td = curthread; maxalign = PAGE_SIZE; mapsz = 0; for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: if (n == 0) baddr = phdr[i].p_vaddr; if (phdr[i].p_align > maxalign) maxalign = phdr[i].p_align; mapsz += phdr[i].p_memsz; n++; + + /* + * If this segment contains the program headers, + * remember their virtual address for the AT_PHDR + * aux entry. Static binaries don't usually include + * a PT_PHDR entry. + */ + if (phdr[i].p_offset == 0 && + hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize + <= phdr[i].p_filesz) + proghdr = phdr[i].p_vaddr + hdr->e_phoff; break; case PT_INTERP: /* Path to interpreter */ if (interp != NULL) { uprintf("Multiple PT_INTERP headers\n"); error = ENOEXEC; goto ret; } error = __elfN(get_interp)(imgp, &phdr[i], &interp, &free_interp); if (error != 0) goto ret; break; case PT_GNU_STACK: if (__elfN(nxstack)) imgp->stack_prot = __elfN(trans_prot)(phdr[i].p_flags); imgp->stack_sz = phdr[i].p_memsz; break; + case PT_PHDR: /* Program header table info */ + proghdr = phdr[i].p_vaddr; + break; } } brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0); if (brand_info == NULL) { uprintf("ELF binary type \"%u\" not known.\n", hdr->e_ident[EI_OSABI]); error = ENOEXEC; goto ret; } sv = brand_info->sysvec; et_dyn_addr = 0; if (hdr->e_type == ET_DYN) { if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { uprintf("Cannot execute shared object\n"); error = ENOEXEC; goto ret; } /* * Honour the base load address from the dso if it is * non-zero for some reason. */ if (baddr == 0) { if ((sv->sv_flags & SV_ASLR) == 0 || (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) et_dyn_addr = ET_DYN_LOAD_ADDR; else if ((__elfN(pie_aslr_enabled) && (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) et_dyn_addr = ET_DYN_ADDR_RAND; else et_dyn_addr = ET_DYN_LOAD_ADDR; } } /* * Avoid a possible deadlock if the current address space is destroyed * and that address space maps the locked vnode. In the common case, * the locked vnode's v_usecount is decremented but remains greater * than zero. Consequently, the vnode lock is not needed by vrele(). * However, in cases where the vnode lock is external, such as nullfs, * v_usecount may become zero. * * The VV_TEXT flag prevents modifications to the executable while * the vnode is unlocked. */ VOP_UNLOCK(imgp->vp, 0); /* * Decide whether to enable randomization of user mappings. * First, reset user preferences for the setid binaries. * Then, account for the support of the randomization by the * ABI, by user preferences, and make special treatment for * PIE binaries. */ if (imgp->credential_setid) { PROC_LOCK(imgp->proc); imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE); PROC_UNLOCK(imgp->proc); } if ((sv->sv_flags & SV_ASLR) == 0 || (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 || (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) { KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND, ("et_dyn_addr == RAND and !ASLR")); } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) || et_dyn_addr == ET_DYN_ADDR_RAND) { imgp->map_flags |= MAP_ASLR; /* * If user does not care about sbrk, utilize the bss * grow region for mappings as well. We can select * the base for the image anywere and still not suffer * from the fragmentation. */ if (!__elfN(aslr_honor_sbrk) || (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) imgp->map_flags |= MAP_ASLR_IGNSTART; } error = exec_new_vmspace(imgp, sv); vmspace = imgp->proc->p_vmspace; map = &vmspace->vm_map; imgp->proc->p_sysent = sv; maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK); if (et_dyn_addr == ET_DYN_ADDR_RAND) { KASSERT((map->flags & MAP_ASLR) != 0, ("ET_DYN_ADDR_RAND but !MAP_ASLR")); et_dyn_addr = __CONCAT(rnd_, __elfN(base))(map, vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), /* reserve half of the address space to interpreter */ maxv / 2, 1UL << flsl(maxalign)); } vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); if (error != 0) goto ret; - for (i = 0; i < hdr->e_phnum; i++) { - switch (phdr[i].p_type) { - case PT_LOAD: /* Loadable segment */ - if (phdr[i].p_memsz == 0) - break; - prot = __elfN(trans_prot)(phdr[i].p_flags); - error = __elfN(load_section)(imgp, phdr[i].p_offset, - (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr, - phdr[i].p_memsz, phdr[i].p_filesz, prot); - if (error != 0) - goto ret; + error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL); + if (error != 0) + goto ret; - /* - * If this segment contains the program headers, - * remember their virtual address for the AT_PHDR - * aux entry. Static binaries don't usually include - * a PT_PHDR entry. - */ - if (phdr[i].p_offset == 0 && - hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize - <= phdr[i].p_filesz) - proghdr = phdr[i].p_vaddr + hdr->e_phoff + - et_dyn_addr; - break; - case PT_PHDR: /* Program header table info */ - proghdr = phdr[i].p_vaddr + et_dyn_addr; - break; - default: - break; - } - } - error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr); if (error != 0) goto ret; entry = (u_long)hdr->e_entry + et_dyn_addr; /* * We load the dynamic linker where a userland call * to mmap(0, ...) would put it. The rationale behind this * calculation is that it leaves room for the heap to grow to * its maximum allowed size. */ addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, RLIMIT_DATA)); if ((map->flags & MAP_ASLR) != 0) { maxv1 = maxv / 2 + addr / 2; MPASS(maxv1 >= addr); /* No overflow */ map->anon_loc = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, MAXPAGESIZES > 1 ? pagesizes[1] : pagesizes[0]); } else { map->anon_loc = addr; } imgp->entry_addr = entry; if (interp != NULL) { VOP_UNLOCK(imgp->vp, 0); if ((map->flags & MAP_ASLR) != 0) { /* Assume that interpeter fits into 1/4 of AS */ maxv1 = maxv / 2 + addr / 2; MPASS(maxv1 >= addr); /* No overflow */ addr = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, PAGE_SIZE); } error = __elfN(load_interp)(imgp, brand_info, interp, &addr, &imgp->entry_addr); vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); if (error != 0) goto ret; } else addr = et_dyn_addr; /* * Construct auxargs table (used by the fixup routine) */ elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); elf_auxargs->execfd = -1; - elf_auxargs->phdr = proghdr; + elf_auxargs->phdr = proghdr + et_dyn_addr; elf_auxargs->phent = hdr->e_phentsize; elf_auxargs->phnum = hdr->e_phnum; elf_auxargs->pagesz = PAGE_SIZE; elf_auxargs->base = addr; elf_auxargs->flags = 0; elf_auxargs->entry = entry; elf_auxargs->hdr_eflags = hdr->e_flags; imgp->auxargs = elf_auxargs; imgp->interpreted = 0; imgp->reloc_base = addr; imgp->proc->p_osrel = osrel; imgp->proc->p_fctl0 = fctl0; imgp->proc->p_elf_machine = hdr->e_machine; imgp->proc->p_elf_flags = hdr->e_flags; ret: if (free_interp) free(interp, M_TEMP); return (error); } #define suword __CONCAT(suword, __ELF_WORD_SIZE) int __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) { Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; Elf_Auxinfo *argarray, *pos; Elf_Addr *base, *auxbase; int error; base = (Elf_Addr *)*stack_base; auxbase = base + imgp->args->argc + 1 + imgp->args->envc + 1; argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP, M_WAITOK | M_ZERO); if (args->execfd != -1) AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); AUXARGS_ENTRY(pos, AT_PHENT, args->phent); AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); AUXARGS_ENTRY(pos, AT_BASE, args->base); AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); if (imgp->execpathp != 0) AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp); AUXARGS_ENTRY(pos, AT_OSRELDATE, imgp->proc->p_ucred->cr_prison->pr_osreldate); if (imgp->canary != 0) { AUXARGS_ENTRY(pos, AT_CANARY, imgp->canary); AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); } AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); if (imgp->pagesizes != 0) { AUXARGS_ENTRY(pos, AT_PAGESIZES, imgp->pagesizes); AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); } if (imgp->sysent->sv_timekeep_base != 0) { AUXARGS_ENTRY(pos, AT_TIMEKEEP, imgp->sysent->sv_timekeep_base); } AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : imgp->sysent->sv_stackprot); if (imgp->sysent->sv_hwcap != NULL) AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap); if (imgp->sysent->sv_hwcap2 != NULL) AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2); AUXARGS_ENTRY(pos, AT_NULL, 0); free(imgp->auxargs, M_TEMP); imgp->auxargs = NULL; KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs")); error = copyout(argarray, auxbase, sizeof(*argarray) * AT_COUNT); free(argarray, M_TEMP); if (error != 0) return (error); base--; if (suword(base, imgp->args->argc) == -1) return (EFAULT); *stack_base = (register_t *)base; return (0); } /* * Code for generating ELF core dumps. */ typedef void (*segment_callback)(vm_map_entry_t, void *); /* Closure for cb_put_phdr(). */ struct phdr_closure { Elf_Phdr *phdr; /* Program header to fill in */ Elf_Off offset; /* Offset of segment in core file */ }; /* Closure for cb_size_segment(). */ struct sseg_closure { int count; /* Count of writable segments. */ size_t size; /* Total size of all writable segments. */ }; typedef void (*outfunc_t)(void *, struct sbuf *, size_t *); struct note_info { int type; /* Note type. */ outfunc_t outfunc; /* Output function. */ void *outarg; /* Argument for the output function. */ size_t outsize; /* Output size. */ TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ }; TAILQ_HEAD(note_info_list, note_info); /* Coredump output parameters. */ struct coredump_params { off_t offset; struct ucred *active_cred; struct ucred *file_cred; struct thread *td; struct vnode *vp; struct compressor *comp; }; extern int compress_user_cores; extern int compress_user_cores_level; static void cb_put_phdr(vm_map_entry_t, void *); static void cb_size_segment(vm_map_entry_t, void *); static int core_write(struct coredump_params *, const void *, size_t, off_t, enum uio_seg); static void each_dumpable_segment(struct thread *, segment_callback, void *); static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, struct note_info_list *, size_t); static void __elfN(prepare_notes)(struct thread *, struct note_info_list *, size_t *); static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t); static void __elfN(putnote)(struct note_info *, struct sbuf *); static size_t register_note(struct note_info_list *, int, outfunc_t, void *); static int sbuf_drain_core_output(void *, const char *, int); static int sbuf_drain_count(void *arg, const char *data, int len); static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *); static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *); static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *); static void __elfN(note_ptlwpinfo)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); static void note_procstat_files(void *, struct sbuf *, size_t *); static void note_procstat_groups(void *, struct sbuf *, size_t *); static void note_procstat_osrel(void *, struct sbuf *, size_t *); static void note_procstat_rlimit(void *, struct sbuf *, size_t *); static void note_procstat_umask(void *, struct sbuf *, size_t *); static void note_procstat_vmmap(void *, struct sbuf *, size_t *); /* * Write out a core segment to the compression stream. */ static int compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len) { u_int chunk_len; int error; while (len > 0) { chunk_len = MIN(len, CORE_BUF_SIZE); /* * We can get EFAULT error here. * In that case zero out the current chunk of the segment. */ error = copyin(base, buf, chunk_len); if (error != 0) bzero(buf, chunk_len); error = compressor_write(p->comp, buf, chunk_len); if (error != 0) break; base += chunk_len; len -= chunk_len; } return (error); } static int core_compressed_write(void *base, size_t len, off_t offset, void *arg) { return (core_write((struct coredump_params *)arg, base, len, offset, UIO_SYSSPACE)); } static int core_write(struct coredump_params *p, const void *base, size_t len, off_t offset, enum uio_seg seg) { return (vn_rdwr_inchunks(UIO_WRITE, p->vp, __DECONST(void *, base), len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, p->active_cred, p->file_cred, NULL, p->td)); } static int core_output(void *base, size_t len, off_t offset, struct coredump_params *p, void *tmpbuf) { int error; if (p->comp != NULL) return (compress_chunk(p, base, tmpbuf, len)); /* * EFAULT is a non-fatal error that we can get, for example, * if the segment is backed by a file but extends beyond its * end. */ error = core_write(p, base, len, offset, UIO_USERSPACE); if (error == EFAULT) { log(LOG_WARNING, "Failed to fully fault in a core file segment " "at VA %p with size 0x%zx to be written at offset 0x%jx " "for process %s\n", base, len, offset, curproc->p_comm); /* * Write a "real" zero byte at the end of the target region * in the case this is the last segment. * The intermediate space will be implicitly zero-filled. */ error = core_write(p, zero_region, 1, offset + len - 1, UIO_SYSSPACE); } return (error); } /* * Drain into a core file. */ static int sbuf_drain_core_output(void *arg, const char *data, int len) { struct coredump_params *p; int error, locked; p = (struct coredump_params *)arg; /* * Some kern_proc out routines that print to this sbuf may * call us with the process lock held. Draining with the * non-sleepable lock held is unsafe. The lock is needed for * those routines when dumping a live process. In our case we * can safely release the lock before draining and acquire * again after. */ locked = PROC_LOCKED(p->td->td_proc); if (locked) PROC_UNLOCK(p->td->td_proc); if (p->comp != NULL) error = compressor_write(p->comp, __DECONST(char *, data), len); else error = core_write(p, __DECONST(void *, data), len, p->offset, UIO_SYSSPACE); if (locked) PROC_LOCK(p->td->td_proc); if (error != 0) return (-error); p->offset += len; return (len); } /* * Drain into a counter. */ static int sbuf_drain_count(void *arg, const char *data __unused, int len) { size_t *sizep; sizep = (size_t *)arg; *sizep += len; return (len); } int __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) { struct ucred *cred = td->td_ucred; int error = 0; struct sseg_closure seginfo; struct note_info_list notelst; struct coredump_params params; struct note_info *ninfo; void *hdr, *tmpbuf; size_t hdrsize, notesz, coresize; hdr = NULL; tmpbuf = NULL; TAILQ_INIT(¬elst); /* Size the program segments. */ seginfo.count = 0; seginfo.size = 0; each_dumpable_segment(td, cb_size_segment, &seginfo); /* * Collect info about the core file header area. */ hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); if (seginfo.count + 1 >= PN_XNUM) hdrsize += sizeof(Elf_Shdr); __elfN(prepare_notes)(td, ¬elst, ¬esz); coresize = round_page(hdrsize + notesz) + seginfo.size; /* Set up core dump parameters. */ params.offset = 0; params.active_cred = cred; params.file_cred = NOCRED; params.td = td; params.vp = vp; params.comp = NULL; #ifdef RACCT if (racct_enable) { PROC_LOCK(td->td_proc); error = racct_add(td->td_proc, RACCT_CORE, coresize); PROC_UNLOCK(td->td_proc); if (error != 0) { error = EFAULT; goto done; } } #endif if (coresize >= limit) { error = EFAULT; goto done; } /* Create a compression stream if necessary. */ if (compress_user_cores != 0) { params.comp = compressor_init(core_compressed_write, compress_user_cores, CORE_BUF_SIZE, compress_user_cores_level, ¶ms); if (params.comp == NULL) { error = EFAULT; goto done; } tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); } /* * Allocate memory for building the header, fill it up, * and write it out following the notes. */ hdr = malloc(hdrsize, M_TEMP, M_WAITOK); error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, notesz); /* Write the contents of all of the writable segments. */ if (error == 0) { Elf_Phdr *php; off_t offset; int i; php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; offset = round_page(hdrsize + notesz); for (i = 0; i < seginfo.count; i++) { error = core_output((caddr_t)(uintptr_t)php->p_vaddr, php->p_filesz, offset, ¶ms, tmpbuf); if (error != 0) break; offset += php->p_filesz; php++; } if (error == 0 && params.comp != NULL) error = compressor_flush(params.comp); } if (error) { log(LOG_WARNING, "Failed to write core file for process %s (error %d)\n", curproc->p_comm, error); } done: free(tmpbuf, M_TEMP); if (params.comp != NULL) compressor_fini(params.comp); while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { TAILQ_REMOVE(¬elst, ninfo, link); free(ninfo, M_TEMP); } if (hdr != NULL) free(hdr, M_TEMP); return (error); } /* * A callback for each_dumpable_segment() to write out the segment's * program header entry. */ static void cb_put_phdr(vm_map_entry_t entry, void *closure) { struct phdr_closure *phc = (struct phdr_closure *)closure; Elf_Phdr *phdr = phc->phdr; phc->offset = round_page(phc->offset); phdr->p_type = PT_LOAD; phdr->p_offset = phc->offset; phdr->p_vaddr = entry->start; phdr->p_paddr = 0; phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; phdr->p_align = PAGE_SIZE; phdr->p_flags = __elfN(untrans_prot)(entry->protection); phc->offset += phdr->p_filesz; phc->phdr++; } /* * A callback for each_dumpable_segment() to gather information about * the number of segments and their total size. */ static void cb_size_segment(vm_map_entry_t entry, void *closure) { struct sseg_closure *ssc = (struct sseg_closure *)closure; ssc->count++; ssc->size += entry->end - entry->start; } /* * For each writable segment in the process's memory map, call the given * function with a pointer to the map entry and some arbitrary * caller-supplied data. */ static void each_dumpable_segment(struct thread *td, segment_callback func, void *closure) { struct proc *p = td->td_proc; vm_map_t map = &p->p_vmspace->vm_map; vm_map_entry_t entry; vm_object_t backing_object, object; boolean_t ignore_entry; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { /* * Don't dump inaccessible mappings, deal with legacy * coredump mode. * * Note that read-only segments related to the elf binary * are marked MAP_ENTRY_NOCOREDUMP now so we no longer * need to arbitrarily ignore such segments. */ if (elf_legacy_coredump) { if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) continue; } else { if ((entry->protection & VM_PROT_ALL) == 0) continue; } /* * Dont include memory segment in the coredump if * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in * madvise(2). Do not dump submaps (i.e. parts of the * kernel map). */ if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) continue; if ((object = entry->object.vm_object) == NULL) continue; /* Ignore memory-mapped devices and such things. */ VM_OBJECT_RLOCK(object); while ((backing_object = object->backing_object) != NULL) { VM_OBJECT_RLOCK(backing_object); VM_OBJECT_RUNLOCK(object); object = backing_object; } ignore_entry = object->type != OBJT_DEFAULT && object->type != OBJT_SWAP && object->type != OBJT_VNODE && object->type != OBJT_PHYS; VM_OBJECT_RUNLOCK(object); if (ignore_entry) continue; (*func)(entry, closure); } vm_map_unlock_read(map); } /* * Write the core file header to the file, including padding up to * the page boundary. */ static int __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, size_t hdrsize, struct note_info_list *notelst, size_t notesz) { struct note_info *ninfo; struct sbuf *sb; int error; /* Fill in the header. */ bzero(hdr, hdrsize); __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz); sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_core_output, p); sbuf_start_section(sb, NULL); sbuf_bcat(sb, hdr, hdrsize); TAILQ_FOREACH(ninfo, notelst, link) __elfN(putnote)(ninfo, sb); /* Align up to a page boundary for the program segments. */ sbuf_end_section(sb, -1, PAGE_SIZE, 0); error = sbuf_finish(sb); sbuf_delete(sb); return (error); } static void __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, size_t *sizep) { struct proc *p; struct thread *thr; size_t size; p = td->td_proc; size = 0; size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p); /* * To have the debugger select the right thread (LWP) as the initial * thread, we dump the state of the thread passed to us in td first. * This is the thread that causes the core dump and thus likely to * be the right thread one wants to have selected in the debugger. */ thr = td; while (thr != NULL) { size += register_note(list, NT_PRSTATUS, __elfN(note_prstatus), thr); size += register_note(list, NT_FPREGSET, __elfN(note_fpregset), thr); size += register_note(list, NT_THRMISC, __elfN(note_thrmisc), thr); size += register_note(list, NT_PTLWPINFO, __elfN(note_ptlwpinfo), thr); size += register_note(list, -1, __elfN(note_threadmd), thr); thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : TAILQ_NEXT(thr, td_plist); if (thr == td) thr = TAILQ_NEXT(thr, td_plist); } size += register_note(list, NT_PROCSTAT_PROC, __elfN(note_procstat_proc), p); size += register_note(list, NT_PROCSTAT_FILES, note_procstat_files, p); size += register_note(list, NT_PROCSTAT_VMMAP, note_procstat_vmmap, p); size += register_note(list, NT_PROCSTAT_GROUPS, note_procstat_groups, p); size += register_note(list, NT_PROCSTAT_UMASK, note_procstat_umask, p); size += register_note(list, NT_PROCSTAT_RLIMIT, note_procstat_rlimit, p); size += register_note(list, NT_PROCSTAT_OSREL, note_procstat_osrel, p); size += register_note(list, NT_PROCSTAT_PSSTRINGS, __elfN(note_procstat_psstrings), p); size += register_note(list, NT_PROCSTAT_AUXV, __elfN(note_procstat_auxv), p); *sizep = size; } static void __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, size_t notesz) { Elf_Ehdr *ehdr; Elf_Phdr *phdr; Elf_Shdr *shdr; struct phdr_closure phc; ehdr = (Elf_Ehdr *)hdr; ehdr->e_ident[EI_MAG0] = ELFMAG0; ehdr->e_ident[EI_MAG1] = ELFMAG1; ehdr->e_ident[EI_MAG2] = ELFMAG2; ehdr->e_ident[EI_MAG3] = ELFMAG3; ehdr->e_ident[EI_CLASS] = ELF_CLASS; ehdr->e_ident[EI_DATA] = ELF_DATA; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; ehdr->e_ident[EI_ABIVERSION] = 0; ehdr->e_ident[EI_PAD] = 0; ehdr->e_type = ET_CORE; ehdr->e_machine = td->td_proc->p_elf_machine; ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf_Ehdr); ehdr->e_flags = td->td_proc->p_elf_flags; ehdr->e_ehsize = sizeof(Elf_Ehdr); ehdr->e_phentsize = sizeof(Elf_Phdr); ehdr->e_shentsize = sizeof(Elf_Shdr); ehdr->e_shstrndx = SHN_UNDEF; if (numsegs + 1 < PN_XNUM) { ehdr->e_phnum = numsegs + 1; ehdr->e_shnum = 0; } else { ehdr->e_phnum = PN_XNUM; ehdr->e_shnum = 1; ehdr->e_shoff = ehdr->e_phoff + (numsegs + 1) * ehdr->e_phentsize; KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr), ("e_shoff: %zu, hdrsize - shdr: %zu", (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr))); shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); memset(shdr, 0, sizeof(*shdr)); /* * A special first section is used to hold large segment and * section counts. This was proposed by Sun Microsystems in * Solaris and has been adopted by Linux; the standard ELF * tools are already familiar with the technique. * * See table 7-7 of the Solaris "Linker and Libraries Guide" * (or 12-7 depending on the version of the document) for more * details. */ shdr->sh_type = SHT_NULL; shdr->sh_size = ehdr->e_shnum; shdr->sh_link = ehdr->e_shstrndx; shdr->sh_info = numsegs + 1; } /* * Fill in the program header entries. */ phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); /* The note segement. */ phdr->p_type = PT_NOTE; phdr->p_offset = hdrsize; phdr->p_vaddr = 0; phdr->p_paddr = 0; phdr->p_filesz = notesz; phdr->p_memsz = 0; phdr->p_flags = PF_R; phdr->p_align = ELF_NOTE_ROUNDSIZE; phdr++; /* All the writable segments from the program. */ phc.phdr = phdr; phc.offset = round_page(hdrsize + notesz); each_dumpable_segment(td, cb_put_phdr, &phc); } static size_t register_note(struct note_info_list *list, int type, outfunc_t out, void *arg) { struct note_info *ninfo; size_t size, notesize; size = 0; out(arg, NULL, &size); ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); ninfo->type = type; ninfo->outfunc = out; ninfo->outarg = arg; ninfo->outsize = size; TAILQ_INSERT_TAIL(list, ninfo, link); if (type == -1) return (size); notesize = sizeof(Elf_Note) + /* note header */ roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static size_t append_note_data(const void *src, void *dst, size_t len) { size_t padded_len; padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); if (dst != NULL) { bcopy(src, dst, len); bzero((char *)dst + len, padded_len - len); } return (padded_len); } size_t __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) { Elf_Note *note; char *buf; size_t notesize; buf = dst; if (buf != NULL) { note = (Elf_Note *)buf; note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); note->n_descsz = size; note->n_type = type; buf += sizeof(*note); buf += append_note_data(FREEBSD_ABI_VENDOR, buf, sizeof(FREEBSD_ABI_VENDOR)); append_note_data(src, buf, size); if (descp != NULL) *descp = buf; } notesize = sizeof(Elf_Note) + /* note header */ roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static void __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb) { Elf_Note note; ssize_t old_len, sect_len; size_t new_len, descsz, i; if (ninfo->type == -1) { ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); return; } note.n_namesz = sizeof(FREEBSD_ABI_VENDOR); note.n_descsz = ninfo->outsize; note.n_type = ninfo->type; sbuf_bcat(sb, ¬e, sizeof(note)); sbuf_start_section(sb, &old_len); sbuf_bcat(sb, FREEBSD_ABI_VENDOR, sizeof(FREEBSD_ABI_VENDOR)); sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (note.n_descsz == 0) return; sbuf_start_section(sb, &old_len); ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (sect_len < 0) return; new_len = (size_t)sect_len; descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); if (new_len < descsz) { /* * It is expected that individual note emitters will correctly * predict their expected output size and fill up to that size * themselves, padding in a format-specific way if needed. * However, in case they don't, just do it here with zeros. */ for (i = 0; i < descsz - new_len; i++) sbuf_putc(sb, 0); } else if (new_len > descsz) { /* * We can't always truncate sb -- we may have drained some * of it already. */ KASSERT(new_len == descsz, ("%s: Note type %u changed as we " "read it (%zu > %zu). Since it is longer than " "expected, this coredump's notes are corrupt. THIS " "IS A BUG in the note_procstat routine for type %u.\n", __func__, (unsigned)note.n_type, new_len, descsz, (unsigned)note.n_type)); } } /* * Miscellaneous note out functions. */ #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 #include #include typedef struct prstatus32 elf_prstatus_t; typedef struct prpsinfo32 elf_prpsinfo_t; typedef struct fpreg32 elf_prfpregset_t; typedef struct fpreg32 elf_fpregset_t; typedef struct reg32 elf_gregset_t; typedef struct thrmisc32 elf_thrmisc_t; #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 typedef struct kinfo_proc32 elf_kinfo_proc_t; typedef uint32_t elf_ps_strings_t; #else typedef prstatus_t elf_prstatus_t; typedef prpsinfo_t elf_prpsinfo_t; typedef prfpregset_t elf_prfpregset_t; typedef prfpregset_t elf_fpregset_t; typedef gregset_t elf_gregset_t; typedef thrmisc_t elf_thrmisc_t; #define ELF_KERN_PROC_MASK 0 typedef struct kinfo_proc elf_kinfo_proc_t; typedef vm_offset_t elf_ps_strings_t; #endif static void __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) { struct sbuf sbarg; size_t len; char *cp, *end; struct proc *p; elf_prpsinfo_t *psinfo; int error; p = (struct proc *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); psinfo->pr_version = PRPSINFO_VERSION; psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); PROC_LOCK(p); if (p->p_args != NULL) { len = sizeof(psinfo->pr_psargs) - 1; if (len > p->p_args->ar_length) len = p->p_args->ar_length; memcpy(psinfo->pr_psargs, p->p_args->ar_args, len); PROC_UNLOCK(p); error = 0; } else { _PHOLD(p); PROC_UNLOCK(p); sbuf_new(&sbarg, psinfo->pr_psargs, sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN); error = proc_getargv(curthread, p, &sbarg); PRELE(p); if (sbuf_finish(&sbarg) == 0) len = sbuf_len(&sbarg) - 1; else len = sizeof(psinfo->pr_psargs) - 1; sbuf_delete(&sbarg); } if (error || len == 0) strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs)); else { KASSERT(len < sizeof(psinfo->pr_psargs), ("len is too long: %zu vs %zu", len, sizeof(psinfo->pr_psargs))); cp = psinfo->pr_psargs; end = cp + len - 1; for (;;) { cp = memchr(cp, '\0', end - cp); if (cp == NULL) break; *cp = ' '; } } psinfo->pr_pid = p->p_pid; sbuf_bcat(sb, psinfo, sizeof(*psinfo)); free(psinfo, M_TEMP); } *sizep = sizeof(*psinfo); } static void __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_prstatus_t *status; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*status), ("invalid size")); status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK); status->pr_version = PRSTATUS_VERSION; status->pr_statussz = sizeof(elf_prstatus_t); status->pr_gregsetsz = sizeof(elf_gregset_t); status->pr_fpregsetsz = sizeof(elf_fpregset_t); status->pr_osreldate = osreldate; status->pr_cursig = td->td_proc->p_sig; status->pr_pid = td->td_tid; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_regs32(td, &status->pr_reg); #else fill_regs(td, &status->pr_reg); #endif sbuf_bcat(sb, status, sizeof(*status)); free(status, M_TEMP); } *sizep = sizeof(*status); } static void __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_prfpregset_t *fpregset; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*fpregset), ("invalid size")); fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_fpregs32(td, fpregset); #else fill_fpregs(td, fpregset); #endif sbuf_bcat(sb, fpregset, sizeof(*fpregset)); free(fpregset, M_TEMP); } *sizep = sizeof(*fpregset); } static void __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_thrmisc_t thrmisc; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(thrmisc), ("invalid size")); bzero(&thrmisc._pad, sizeof(thrmisc._pad)); strcpy(thrmisc.pr_tname, td->td_name); sbuf_bcat(sb, &thrmisc, sizeof(thrmisc)); } *sizep = sizeof(thrmisc); } static void __elfN(note_ptlwpinfo)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; size_t size; int structsize; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 struct ptrace_lwpinfo32 pl; #else struct ptrace_lwpinfo pl; #endif td = (struct thread *)arg; size = sizeof(structsize) + sizeof(pl); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(pl); sbuf_bcat(sb, &structsize, sizeof(structsize)); bzero(&pl, sizeof(pl)); pl.pl_lwpid = td->td_tid; pl.pl_event = PL_EVENT_NONE; pl.pl_sigmask = td->td_sigmask; pl.pl_siglist = td->td_siglist; if (td->td_si.si_signo != 0) { pl.pl_event = PL_EVENT_SIGNAL; pl.pl_flags |= PL_FLAG_SI; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo); #else pl.pl_siginfo = td->td_si; #endif } strcpy(pl.pl_tdname, td->td_name); /* XXX TODO: supply more information in struct ptrace_lwpinfo*/ sbuf_bcat(sb, &pl, sizeof(pl)); } *sizep = size; } /* * Allow for MD specific notes, as well as any MD * specific preparations for writing MI notes. */ static void __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; void *buf; size_t size; td = (struct thread *)arg; size = *sizep; if (size != 0 && sb != NULL) buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); else buf = NULL; size = 0; __elfN(dump_thread)(td, buf, &size); KASSERT(sb == NULL || *sizep == size, ("invalid size")); if (size != 0 && sb != NULL) sbuf_bcat(sb, buf, size); free(buf, M_TEMP); *sizep = size; } #ifdef KINFO_PROC_SIZE CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); #endif static void __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + p->p_numthreads * sizeof(elf_kinfo_proc_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(elf_kinfo_proc_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_out(p, sb, ELF_KERN_PROC_MASK); } *sizep = size; } #ifdef KINFO_FILE_SIZE CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); #endif static void note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size, sect_sz, i; ssize_t start_len, sect_len; int structsize, filedesc_flags; if (coredump_pack_fileinfo) filedesc_flags = KERN_FILEDESC_PACK_KINFO; else filedesc_flags = 0; p = (struct proc *)arg; structsize = sizeof(struct kinfo_file); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, -1, filedesc_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_start_section(sb, &start_len); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), filedesc_flags); sect_len = sbuf_end_section(sb, start_len, 0, 0); if (sect_len < 0) return; sect_sz = sect_len; KASSERT(sect_sz <= *sizep, ("kern_proc_filedesc_out did not respect maxlen; " "requested %zu, got %zu", *sizep - sizeof(structsize), sect_sz - sizeof(structsize))); for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) sbuf_putc(sb, 0); } } #ifdef KINFO_VMENTRY_SIZE CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); #endif static void note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize, vmmap_flags; if (coredump_pack_vmmapinfo) vmmap_flags = KERN_VMMAP_PACK_KINFO; else vmmap_flags = 0; p = (struct proc *)arg; structsize = sizeof(struct kinfo_vmentry); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_vmmap_out(p, sb, -1, vmmap_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), vmmap_flags); } } static void note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(gid_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * sizeof(gid_t)); } *sizep = size; } static void note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(p->p_fd->fd_cmask); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_fd->fd_cmask); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_fd->fd_cmask, sizeof(p->p_fd->fd_cmask)); } *sizep = size; } static void note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; struct rlimit rlim[RLIM_NLIMITS]; size_t size; int structsize, i; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(rlim); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(rlim); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); for (i = 0; i < RLIM_NLIMITS; i++) lim_rlimit_proc(p, i, &rlim[i]); PROC_UNLOCK(p); sbuf_bcat(sb, rlim, sizeof(rlim)); } *sizep = size; } static void note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(p->p_osrel); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_osrel); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); } *sizep = size; } static void __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; elf_ps_strings_t ps_strings; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(ps_strings); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(ps_strings); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 ps_strings = PTROUT(p->p_sysent->sv_psstrings); #else ps_strings = p->p_sysent->sv_psstrings; #endif sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); } *sizep = size; } static void __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { structsize = sizeof(Elf_Auxinfo); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); } } static boolean_t __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote, const char *note_vendor, const Elf_Phdr *pnote, boolean_t (*cb)(const Elf_Note *, void *, boolean_t *), void *cb_arg) { const Elf_Note *note, *note0, *note_end; const char *note_name; char *buf; int i, error; boolean_t res; /* We need some limit, might as well use PAGE_SIZE. */ if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) return (FALSE); ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); if (pnote->p_offset > PAGE_SIZE || pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { VOP_UNLOCK(imgp->vp, 0); buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, curthread->td_ucred, NOCRED, NULL, curthread); if (error != 0) { uprintf("i/o error PT_NOTE\n"); goto retf; } note = note0 = (const Elf_Note *)buf; note_end = (const Elf_Note *)(buf + pnote->p_filesz); } else { note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset); note_end = (const Elf_Note *)(imgp->image_header + pnote->p_offset + pnote->p_filesz); buf = NULL; } for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { if (!aligned(note, Elf32_Addr) || (const char *)note_end - (const char *)note < sizeof(Elf_Note)) { goto retf; } if (note->n_namesz != checknote->n_namesz || note->n_descsz != checknote->n_descsz || note->n_type != checknote->n_type) goto nextnote; note_name = (const char *)(note + 1); if (note_name + checknote->n_namesz >= (const char *)note_end || strncmp(note_vendor, note_name, checknote->n_namesz) != 0) goto nextnote; if (cb(note, cb_arg, &res)) goto ret; nextnote: note = (const Elf_Note *)((const char *)(note + 1) + roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); } retf: res = FALSE; ret: free(buf, M_TEMP); return (res); } struct brandnote_cb_arg { Elf_Brandnote *brandnote; int32_t *osrel; }; static boolean_t brandnote_cb(const Elf_Note *note, void *arg0, boolean_t *res) { struct brandnote_cb_arg *arg; arg = arg0; /* * Fetch the osreldate for binary from the ELF OSABI-note if * necessary. */ *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 && arg->brandnote->trans_osrel != NULL ? arg->brandnote->trans_osrel(note, arg->osrel) : TRUE; return (TRUE); } static Elf_Note fctl_note = { .n_namesz = sizeof(FREEBSD_ABI_VENDOR), .n_descsz = sizeof(uint32_t), .n_type = NT_FREEBSD_FEATURE_CTL, }; struct fctl_cb_arg { uint32_t *fctl0; }; static boolean_t note_fctl_cb(const Elf_Note *note, void *arg0, boolean_t *res) { struct fctl_cb_arg *arg; const Elf32_Word *desc; uintptr_t p; arg = arg0; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); desc = (const Elf32_Word *)p; *arg->fctl0 = desc[0]; return (TRUE); } /* * Try to find the appropriate ABI-note section for checknote, fetch * the osreldate and feature control flags for binary from the ELF * OSABI-note. Only the first page of the image is searched, the same * as for headers. */ static boolean_t __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote, int32_t *osrel, uint32_t *fctl0) { const Elf_Phdr *phdr; const Elf_Ehdr *hdr; struct brandnote_cb_arg b_arg; struct fctl_cb_arg f_arg; int i, j; hdr = (const Elf_Ehdr *)imgp->image_header; phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); b_arg.brandnote = brandnote; b_arg.osrel = osrel; f_arg.fctl0 = fctl0; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb, &b_arg)) { for (j = 0; j < hdr->e_phnum; j++) { if (phdr[j].p_type == PT_NOTE && __elfN(parse_notes)(imgp, &fctl_note, FREEBSD_ABI_VENDOR, &phdr[j], note_fctl_cb, &f_arg)) break; } return (TRUE); } } return (FALSE); } /* * Tell kern_execve.c about it, with a little help from the linker. */ static struct execsw __elfN(execsw) = { .ex_imgact = __CONCAT(exec_, __elfN(imgact)), .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) }; EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); static vm_prot_t __elfN(trans_prot)(Elf_Word flags) { vm_prot_t prot; prot = 0; if (flags & PF_X) prot |= VM_PROT_EXECUTE; if (flags & PF_W) prot |= VM_PROT_WRITE; if (flags & PF_R) prot |= VM_PROT_READ; #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) if (i386_read_exec && (flags & PF_R)) prot |= VM_PROT_EXECUTE; #endif return (prot); } static Elf_Word __elfN(untrans_prot)(vm_prot_t prot) { Elf_Word flags; flags = 0; if (prot & VM_PROT_EXECUTE) flags |= PF_X; if (prot & VM_PROT_READ) flags |= PF_R; if (prot & VM_PROT_WRITE) flags |= PF_W; return (flags); } Index: projects/runtime-coverage-v2/tests/sys/geom/class/eli/online_resize_test.sh =================================================================== --- projects/runtime-coverage-v2/tests/sys/geom/class/eli/online_resize_test.sh (revision 346057) +++ projects/runtime-coverage-v2/tests/sys/geom/class/eli/online_resize_test.sh (revision 346058) @@ -1,196 +1,192 @@ #!/bin/sh # $FreeBSD$ . $(atf_get_srcdir)/conf.sh atf_test_case online_resize cleanup online_resize_head() { atf_set "descr" "online resize of geli providers" atf_set "require.user" "root" } online_resize_body() { geli_test_setup ( echo "m 512 none 10485248 1 1 20971008 1 1 31456768 1 1" echo "m 4096 none 10481664 1 1 20967424 1 1 31453184 1 1" echo "m 512 HMAC/SHA256 5242368 1 1 10485248 1 1 15728128 1 1" echo "m 4096 HMAC/SHA256 9318400 1 1 18640896 1 1 27959296 1 1" echo "p 512 none 11258999068425728 [0-9] 20971520 22517998136851968 [0-9] 41943040 33776997205278208 [0-9] 62914560" echo "p 4096 none 11258999068422144 [0-9] 2621440 22517998136848384 [0-9] 5242880 33776997205274624 [0-9] 7864320" echo "p 512 HMAC/SHA256 5629499534212608 [0-9] 20971520 11258999068425728 [0-9] 41943040 16888498602638848 [0-9] 62914560" echo "p 4096 HMAC/SHA256 10007999171932160 [0-9] 20971520 20015998343868416 [0-9] 41943040 30023997515800576 [0-9] 62914560" ) | while read prefix sector auth esize10 ka10 kt10 esize20 ka20 kt20 esize30 ka30 kt30; do if [ "${auth}" = "none" ]; then aalgo="" eflags="0x200" dflags="0x0" else aalgo="-a ${auth}" eflags="0x210" dflags="0x10" fi if [ "${prefix}" = "m" ]; then psize10="10485760" psize20="20971520" psize30="31457280" else psize10="11258999068426240" psize20="22517998136852480" psize30="33776997205278720" fi md=$(attach_md -t malloc -s40${prefix}) # Initialise atf_check -s exit:0 -o ignore gpart create -s GPT ${md} atf_check -s exit:0 -o ignore gpart add -t freebsd-ufs -s 10${prefix} ${md} echo secret >tmp.key atf_check geli init ${aalgo} -s ${sector} -Bnone -PKtmp.key ${md}p1 # Autoresize is set by default. atf_check -s exit:0 -o match:"flags: ${eflags}$" geli dump ${md}p1 atf_check geli configure -R ${md}p1 atf_check -s exit:0 -o match:"flags: ${dflags}$" geli dump ${md}p1 atf_check geli configure -r ${md}p1 atf_check -s exit:0 -o match:"flags: ${eflags}$" geli dump ${md}p1 atf_check geli init -R ${aalgo} -s ${sector} -Bnone -PKtmp.key ${md}p1 atf_check -s exit:0 -o match:"flags: ${dflags}$" geli dump ${md}p1 atf_check geli configure -r ${md}p1 atf_check -s exit:0 -o match:"flags: ${eflags}$" geli dump ${md}p1 atf_check geli configure -R ${md}p1 atf_check -s exit:0 -o match:"flags: ${dflags}$" geli dump ${md}p1 atf_check geli init ${aalgo} -s ${sector} -Bnone -PKtmp.key ${md}p1 atf_check geli attach -pk tmp.key ${md}p1 atf_check -s exit:0 -o match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli atf_check geli configure -R ${md}p1 atf_check -s exit:0 -o match:"flags: ${dflags}$" geli dump ${md}p1 atf_check -o not-match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli atf_check geli configure -r ${md}p1 atf_check -s exit:0 -o match:"flags: ${eflags}$" geli dump ${md}p1 atf_check -s exit:0 -o match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli atf_check geli configure -R ${md}p1 atf_check -s exit:0 -o match:"provsize: ${psize10}$" geli dump ${md}p1 atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 20${prefix} ${md} # Autoresize turned off - we lose metadata. atf_check -s exit:1 -o empty -e ignore geli dump ${md}p1 atf_check geli detach ${md}p1.eli # When we recover previous size, the metadata should be there. atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 10${prefix} ${md} atf_check -s exit:0 -o match:"flags: ${dflags}$" geli dump ${md}p1 atf_check geli configure -r ${md}p1 atf_check geli attach -pk tmp.key ${md}p1 atf_check -s exit:0 -o match:"^[[:space:]]${esize10}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check -s exit:0 -o match:"^KeysAllocated: ${ka10}$" geli list ${md}p1.eli atf_check -s exit:0 -o match:"^KeysTotal: ${kt10}$" geli list ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 20${prefix} ${md} atf_check -s exit:0 -o match:"provsize: ${psize20}$" geli dump ${md}p1 atf_check -s exit:0 -o match:"^[[:space:]]${esize20}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check -s exit:0 -o match:"^KeysAllocated: ${ka20}$" geli list ${md}p1.eli atf_check -s exit:0 -o match:"^KeysTotal: ${kt20}$" geli list ${md}p1.eli atf_check -s exit:0 -o match:"flags: ${eflags}$" geli dump ${md}p1 atf_check -s exit:0 -o match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli if [ "${prefix}" = "m" ]; then atf_check -s exit:1 -o empty -e match:"^${esize20} bytes transferred " dd if=/dev/random of=/dev/${md}p1.eli bs=1m atf_check -s exit:0 -o empty -e match:"^${esize20} bytes transferred " dd if=/dev/${md}p1.eli of=/dev/null bs=1m fi atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 30${prefix} ${md} atf_check -s exit:0 -o match:"provsize: ${psize30}$" geli dump ${md}p1 atf_check -s exit:0 -o match:"^[[:space:]]${esize30}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check -s exit:0 -o match:"^KeysAllocated: ${ka30}$" geli list ${md}p1.eli atf_check -s exit:0 -o match:"^KeysTotal: ${kt30}$" geli list ${md}p1.eli atf_check -s exit:0 -o match:"flags: ${eflags}$" geli dump ${md}p1 atf_check -s exit:0 -o match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli if [ "${prefix}" = "m" ]; then atf_check -s exit:1 -o empty -e match:"^${esize30} bytes transferred " dd if=/dev/random of=/dev/${md}p1.eli bs=1m atf_check -s exit:0 -o empty -e match:"^${esize30} bytes transferred " dd if=/dev/${md}p1.eli of=/dev/null bs=1m fi atf_check geli detach ${md}p1.eli # Make sure that the old metadata is removed. atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 20${prefix} ${md} atf_check -s exit:1 -o empty -e ignore geli dump ${md}p1 atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 10${prefix} ${md} atf_check -s exit:1 -o empty -e ignore geli dump ${md}p1 # Test geli with onetime keys. if [ "${auth}" = "none" ]; then osize10="${psize10}" osize20="${psize20}" osize30="${psize30}" else osize10="${esize10}" osize20="${esize20}" osize30="${esize30}" if [ "${sector}" -eq 512 ]; then osize10=$((osize10+sector)) osize20=$((osize20+sector)) osize30=$((osize30+sector)) fi fi atf_check geli onetime ${aalgo} -s ${sector} ${md}p1 atf_check -s exit:0 -o match:"^[[:space:]]${osize10}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check -s exit:0 -o match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 20${prefix} ${md} atf_check -s exit:0 -o match:"^[[:space:]]${osize20}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 30${prefix} ${md} atf_check -s exit:0 -o match:"^[[:space:]]${osize30}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check geli detach ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 10${prefix} ${md} atf_check geli onetime -R ${aalgo} -s ${sector} ${md}p1 atf_check -s exit:0 -o match:"^[[:space:]]${osize10}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check -o not-match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 20${prefix} ${md} atf_check -s exit:0 -o match:"^[[:space:]]${osize10}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 30${prefix} ${md} atf_check -s exit:0 -o match:"^[[:space:]]${osize10}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check geli detach ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 10${prefix} ${md} atf_check geli onetime ${aalgo} -s ${sector} ${md}p1 atf_check -s exit:0 -o match:"^[[:space:]]${osize10}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check -s exit:0 -o match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli atf_check geli configure -R ${md}p1 atf_check -o not-match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 20${prefix} ${md} atf_check -s exit:0 -o match:"^[[:space:]]${osize10}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli atf_check geli configure -r ${md}p1 atf_check -s exit:0 -o match:'^Flags: .*AUTORESIZE' geli list ${md}p1.eli atf_check -s exit:0 -o match:resized gpart resize -i 1 -s 30${prefix} ${md} atf_check -s exit:0 -o match:"^[[:space:]]${osize30}[[:space:]]+# mediasize in bytes" diskinfo -v ${md}p1.eli - atf_check geli detach ${md}p1.eli - - # Cleanup. - atf_check -s exit:0 -o match:deleted gpart delete -i 1 ${md} - atf_check -s exit:0 -o match:destroyed gpart destroy ${md} - atf_check -s exit:0 -o ignore mdconfig -d -u ${md} done } online_resize_cleanup() { if [ -f "$TEST_MDS_FILE" ]; then while read md; do - [ -c /dev/${md}p1.eli ] && geli detach ${md}p1.eli - mdconfig -d -u ${md} 2>/dev/null + atf_check -s ignore -e ignore -o ignore geli detach ${md}p1.eli + atf_check -s ignore -e ignore -o ignore gpart delete -i 1 ${md} + atf_check -s ignore -e ignore -o ignore gpart destroy ${md} done < $TEST_MDS_FILE fi + geli_test_cleanup } atf_init_test_cases() { atf_add_test_case online_resize } Index: projects/runtime-coverage-v2/tools/tools/ioat/ioatcontrol.c =================================================================== --- projects/runtime-coverage-v2/tools/tools/ioat/ioatcontrol.c (revision 346057) +++ projects/runtime-coverage-v2/tools/tools/ioat/ioatcontrol.c (revision 346058) @@ -1,283 +1,285 @@ /*- * Copyright (C) 2012 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "ioat_test.h" static int prettyprint(struct ioat_test *); static void usage(void) { - printf("Usage: %s [-c period] [-EfmVz] channel-number num-txns [ " + printf("Usage: %s [-c period] [-EefmVxXz] channel-number num-txns [ " "[ [duration]]]\n", getprogname()); printf(" %s -r [-c period] [-vVwz] channel-number address []\n\n", getprogname()); printf(" -c period - Enable interrupt coalescing (us) (default: 0)\n"); printf(" -E - Test contiguous 8k copy.\n"); printf(" -e - Test non-contiguous 8k copy.\n"); printf(" -f - Test block fill.\n"); printf(" -m - Test memcpy instead of DMA.\n"); printf(" -r - Issue DMA to or from a specific address.\n"); printf(" -V - Enable verification\n"); printf(" -v -
is a kernel virtual address\n"); printf(" -w - Write to the specified address\n"); printf(" -x - Test DMA CRC.\n"); printf(" -X - Test DMA CRC copy.\n"); printf(" -z - Zero device stats before test\n"); exit(EX_USAGE); } static void main_raw(struct ioat_test *t, int argc, char **argv) { int fd; /* Raw DMA defaults */ t->testkind = IOAT_TEST_RAW_DMA; t->transactions = 1; t->chain_depth = 1; t->buffer_size = 4 * 1024; t->raw_target = strtoull(argv[1], NULL, 0); if (t->raw_target == 0) { printf("Target shoudln't be NULL\n"); exit(EX_USAGE); } if (argc >= 3) { t->buffer_size = atoi(argv[2]); if (t->buffer_size == 0) { printf("Buffer size must be greater than zero\n"); exit(EX_USAGE); } } fd = open("/dev/ioat_test", O_RDWR); if (fd < 0) { printf("Cannot open /dev/ioat_test\n"); exit(EX_UNAVAILABLE); } (void)ioctl(fd, IOAT_DMATEST, t); close(fd); exit(prettyprint(t)); } int main(int argc, char **argv) { struct ioat_test t; int fd, ch; bool fflag, rflag, Eflag, eflag, mflag, xflag, Xflag; unsigned modeflags; memset(&t, 0, sizeof(t)); fflag = rflag = Eflag = eflag = mflag = xflag = Xflag = false; modeflags = 0; while ((ch = getopt(argc, argv, "c:EefmrvVwxXz")) != -1) { switch (ch) { case 'c': t.coalesce_period = atoi(optarg); break; case 'E': Eflag = true; modeflags++; break; case 'e': eflag = true; modeflags++; break; case 'f': fflag = true; modeflags++; break; case 'm': mflag = true; modeflags++; break; case 'r': rflag = true; modeflags++; break; case 'v': t.raw_is_virtual = true; break; case 'V': t.verify = true; break; case 'w': t.raw_write = true; break; case 'x': xflag = true; + modeflags++; break; case 'X': Xflag = true; + modeflags++; break; case 'z': t.zero_stats = true; break; default: usage(); } } argc -= optind; argv += optind; if (argc < 2) usage(); if (modeflags > 1) { - printf("Invalid: Cannot use >1 mode flag (-E, -f, -m, or -r)\n"); + printf("Invalid: Cannot use >1 mode flag (-E, -e, -f, -m, -r, -x or -X)\n"); usage(); } /* Defaults for optional args */ t.buffer_size = 256 * 1024; t.chain_depth = 2; t.duration = 0; t.testkind = IOAT_TEST_DMA; if (fflag) t.testkind = IOAT_TEST_FILL; else if (Eflag || eflag) { t.testkind = IOAT_TEST_DMA_8K; t.buffer_size = 8 * 1024; } else if (mflag) t.testkind = IOAT_TEST_MEMCPY; else if (xflag) t.testkind = IOAT_TEST_DMA_CRC; - else if (xflag) + else if (Xflag) t.testkind = IOAT_TEST_DMA_CRC_COPY; t.channel_index = atoi(argv[0]); if (t.channel_index > 8) { printf("Channel number must be between 0 and 7.\n"); return (EX_USAGE); } if (rflag) { main_raw(&t, argc, argv); return (EX_OK); } t.transactions = atoi(argv[1]); if (argc >= 3) { t.buffer_size = atoi(argv[2]); if (t.buffer_size == 0) { printf("Buffer size must be greater than zero\n"); return (EX_USAGE); } } if (argc >= 4) { t.chain_depth = atoi(argv[3]); if (t.chain_depth < 1) { printf("Chain length must be greater than zero\n"); return (EX_USAGE); } } if (argc >= 5) { t.duration = atoi(argv[4]); if (t.duration < 1) { printf("Duration must be greater than zero\n"); return (EX_USAGE); } } fd = open("/dev/ioat_test", O_RDWR); if (fd < 0) { printf("Cannot open /dev/ioat_test\n"); return (EX_UNAVAILABLE); } (void)ioctl(fd, IOAT_DMATEST, &t); close(fd); return (prettyprint(&t)); } static int prettyprint(struct ioat_test *t) { char bps[10], bytesh[10]; uintmax_t bytes; if (t->status[IOAT_TEST_NO_DMA_ENGINE] != 0 || t->status[IOAT_TEST_NO_MEMORY] != 0 || t->status[IOAT_TEST_MISCOMPARE] != 0) { printf("Errors:\n"); if (t->status[IOAT_TEST_NO_DMA_ENGINE] != 0) printf("\tNo DMA engine present: %u\n", (unsigned)t->status[IOAT_TEST_NO_DMA_ENGINE]); if (t->status[IOAT_TEST_NO_MEMORY] != 0) printf("\tOut of memory: %u\n", (unsigned)t->status[IOAT_TEST_NO_MEMORY]); if (t->status[IOAT_TEST_MISCOMPARE] != 0) printf("\tMiscompares: %u\n", (unsigned)t->status[IOAT_TEST_MISCOMPARE]); } printf("Processed %u txns\n", (unsigned)t->status[IOAT_TEST_OK] / t->chain_depth); bytes = (uintmax_t)t->buffer_size * t->status[IOAT_TEST_OK]; humanize_number(bytesh, sizeof(bytesh), (int64_t)bytes, "B", HN_AUTOSCALE, HN_DECIMAL); if (t->duration) { humanize_number(bps, sizeof(bps), (int64_t)1000 * bytes / t->duration, "B/s", HN_AUTOSCALE, HN_DECIMAL); printf("%ju (%s) copied in %u ms (%s)\n", bytes, bytesh, (unsigned)t->duration, bps); } else printf("%ju (%s) copied\n", bytes, bytesh); return (EX_OK); } Index: projects/runtime-coverage-v2 =================================================================== --- projects/runtime-coverage-v2 (revision 346057) +++ projects/runtime-coverage-v2 (revision 346058) Property changes on: projects/runtime-coverage-v2 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r346044-346057